diff --git a/Package.swift b/Package.swift index a469afdfc1..f3132390b2 100644 --- a/Package.swift +++ b/Package.swift @@ -276,7 +276,6 @@ let package = Package( .library(name: "SotoMigrationHubOrchestrator", targets: ["SotoMigrationHubOrchestrator"]), .library(name: "SotoMigrationHubRefactorSpaces", targets: ["SotoMigrationHubRefactorSpaces"]), .library(name: "SotoMigrationHubStrategy", targets: ["SotoMigrationHubStrategy"]), - .library(name: "SotoMobile", targets: ["SotoMobile"]), .library(name: "SotoNeptune", targets: ["SotoNeptune"]), .library(name: "SotoNeptuneGraph", targets: ["SotoNeptuneGraph"]), .library(name: "SotoNeptunedata", targets: ["SotoNeptunedata"]), @@ -352,6 +351,7 @@ let package = Package( .library(name: "SotoSSM", targets: ["SotoSSM"]), .library(name: "SotoSSMContacts", targets: ["SotoSSMContacts"]), .library(name: "SotoSSMIncidents", targets: ["SotoSSMIncidents"]), + .library(name: "SotoSSMQuickSetup", targets: ["SotoSSMQuickSetup"]), .library(name: "SotoSSO", targets: ["SotoSSO"]), .library(name: "SotoSSOAdmin", targets: ["SotoSSOAdmin"]), .library(name: "SotoSSOOIDC", targets: ["SotoSSOOIDC"]), @@ -666,7 +666,6 @@ let package = Package( .target(name: "SotoMigrationHubOrchestrator", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MigrationHubOrchestrator", swiftSettings: swiftSettings), .target(name: "SotoMigrationHubRefactorSpaces", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MigrationHubRefactorSpaces", swiftSettings: swiftSettings), .target(name: "SotoMigrationHubStrategy", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MigrationHubStrategy", swiftSettings: swiftSettings), - .target(name: "SotoMobile", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Mobile", swiftSettings: swiftSettings), .target(name: "SotoNeptune", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Neptune", swiftSettings: swiftSettings), .target(name: "SotoNeptuneGraph", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/NeptuneGraph", swiftSettings: swiftSettings), .target(name: "SotoNeptunedata", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Neptunedata", swiftSettings: swiftSettings), @@ -742,6 +741,7 @@ let package = Package( .target(name: "SotoSSM", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSM", swiftSettings: swiftSettings), .target(name: "SotoSSMContacts", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSMContacts", swiftSettings: swiftSettings), .target(name: "SotoSSMIncidents", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSMIncidents", swiftSettings: swiftSettings), + .target(name: "SotoSSMQuickSetup", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSMQuickSetup", swiftSettings: swiftSettings), .target(name: "SotoSSO", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSO", swiftSettings: swiftSettings), .target(name: "SotoSSOAdmin", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSOAdmin", swiftSettings: swiftSettings), .target(name: "SotoSSOOIDC", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSOOIDC", swiftSettings: swiftSettings), diff --git a/Sources/Soto/Services/Amplify/Amplify_shapes.swift b/Sources/Soto/Services/Amplify/Amplify_shapes.swift index ced72f282a..517209f3c6 100644 --- a/Sources/Soto/Services/Amplify/Amplify_shapes.swift +++ b/Sources/Soto/Services/Amplify/Amplify_shapes.swift @@ -26,6 +26,12 @@ import Foundation extension Amplify { // MARK: Enums + public enum CacheConfigType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case amplifyManaged = "AMPLIFY_MANAGED" + case amplifyManagedNoCookies = "AMPLIFY_MANAGED_NO_COOKIES" + public var description: String { return self.rawValue } + } + public enum CertificateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case amplifyManaged = "AMPLIFY_MANAGED" case custom = "CUSTOM" @@ -114,6 +120,8 @@ extension Amplify { public let basicAuthCredentials: String? /// Describes the content of the build specification (build spec) for the Amplify app. public let buildSpec: String? + /// The cache configuration for the Amplify app. If you don't specify the cache configuration type, Amplify uses the default AMPLIFY_MANAGED setting. + public let cacheConfig: CacheConfig? /// Creates a date and time for the Amplify app. public let createTime: Date /// Describes the custom HTTP headers for the Amplify app. @@ -151,13 +159,14 @@ extension Amplify { /// Updates the date and time for the Amplify app. public let updateTime: Date - public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date) { + public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date) { self.appArn = appArn self.appId = appId self.autoBranchCreationConfig = autoBranchCreationConfig self.autoBranchCreationPatterns = autoBranchCreationPatterns self.basicAuthCredentials = basicAuthCredentials self.buildSpec = buildSpec + self.cacheConfig = cacheConfig self.createTime = createTime self.customHeaders = customHeaders self.customRules = customRules @@ -185,6 +194,7 @@ extension Amplify { case autoBranchCreationPatterns = "autoBranchCreationPatterns" case basicAuthCredentials = "basicAuthCredentials" case buildSpec = "buildSpec" + case cacheConfig = "cacheConfig" case createTime = "createTime" case customHeaders = "customHeaders" case customRules = "customRules" @@ -462,12 +472,25 @@ extension Amplify { } } + public struct CacheConfig: AWSEncodableShape & AWSDecodableShape { + /// The type of cache configuration to use for an Amplify app. The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules. This is the default setting. The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key. + public let type: CacheConfigType + + public init(type: CacheConfigType) { + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case type = "type" + } + } + public struct Certificate: AWSDecodableShape { /// The DNS record for certificate verification. public let certificateVerificationDNSRecord: String? /// The Amazon resource name (ARN) for a custom certificate that you have already added to Certificate Manager in your Amazon Web Services account. This field is required only when the certificate type is CUSTOM. public let customCertificateArn: String? - /// The type of SSL/TLS certificate that you want to use. Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you. Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide . + /// The type of SSL/TLS certificate that you want to use. Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you. Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide. public let type: CertificateType public init(certificateVerificationDNSRecord: String? = nil, customCertificateArn: String? = nil, type: CertificateType) { @@ -516,6 +539,8 @@ extension Amplify { public let basicAuthCredentials: String? /// The build specification (build spec) for an Amplify app. public let buildSpec: String? + /// The cache configuration for the Amplify app. + public let cacheConfig: CacheConfig? /// The custom HTTP headers for an Amplify app. public let customHeaders: String? /// The custom rewrite and redirect rules for an Amplify app. @@ -545,12 +570,13 @@ extension Amplify { /// The tag for an Amplify app. public let tags: [String: String]? - public init(accessToken: String? = nil, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil, tags: [String: String]? = nil) { + public init(accessToken: String? = nil, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil, tags: [String: String]? = nil) { self.accessToken = accessToken self.autoBranchCreationConfig = autoBranchCreationConfig self.autoBranchCreationPatterns = autoBranchCreationPatterns self.basicAuthCredentials = basicAuthCredentials self.buildSpec = buildSpec + self.cacheConfig = cacheConfig self.customHeaders = customHeaders self.customRules = customRules self.description = description @@ -620,6 +646,7 @@ extension Amplify { case autoBranchCreationPatterns = "autoBranchCreationPatterns" case basicAuthCredentials = "basicAuthCredentials" case buildSpec = "buildSpec" + case cacheConfig = "cacheConfig" case customHeaders = "customHeaders" case customRules = "customRules" case description = "description" @@ -2627,6 +2654,8 @@ extension Amplify { public let basicAuthCredentials: String? /// The build specification (build spec) for an Amplify app. public let buildSpec: String? + /// The cache configuration for the Amplify app. + public let cacheConfig: CacheConfig? /// The custom HTTP headers for an Amplify app. public let customHeaders: String? /// The custom redirect and rewrite rules for an Amplify app. @@ -2654,13 +2683,14 @@ extension Amplify { /// The name of the Git repository for an Amplify app. public let repository: String? - public init(accessToken: String? = nil, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String? = nil, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil) { + public init(accessToken: String? = nil, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String? = nil, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil) { self.accessToken = accessToken self.appId = appId self.autoBranchCreationConfig = autoBranchCreationConfig self.autoBranchCreationPatterns = autoBranchCreationPatterns self.basicAuthCredentials = basicAuthCredentials self.buildSpec = buildSpec + self.cacheConfig = cacheConfig self.customHeaders = customHeaders self.customRules = customRules self.description = description @@ -2685,6 +2715,7 @@ extension Amplify { try container.encodeIfPresent(self.autoBranchCreationPatterns, forKey: .autoBranchCreationPatterns) try container.encodeIfPresent(self.basicAuthCredentials, forKey: .basicAuthCredentials) try container.encodeIfPresent(self.buildSpec, forKey: .buildSpec) + try container.encodeIfPresent(self.cacheConfig, forKey: .cacheConfig) try container.encodeIfPresent(self.customHeaders, forKey: .customHeaders) try container.encodeIfPresent(self.customRules, forKey: .customRules) try container.encodeIfPresent(self.description, forKey: .description) @@ -2748,6 +2779,7 @@ extension Amplify { case autoBranchCreationPatterns = "autoBranchCreationPatterns" case basicAuthCredentials = "basicAuthCredentials" case buildSpec = "buildSpec" + case cacheConfig = "cacheConfig" case customHeaders = "customHeaders" case customRules = "customRules" case description = "description" diff --git a/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift b/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift index 67d3c42e31..edf70456ee 100644 --- a/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift +++ b/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS AppIntegrations service. /// -/// The Amazon AppIntegrations service enables you to configure and reuse connections to external applications. For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations and Deliver information to agents using Amazon Connect Wisdom in the Amazon Connect Administrator Guide. +/// Amazon AppIntegrations actions Amazon AppIntegrations data types The Amazon AppIntegrations service enables you to configure and reuse connections to external applications. For information about how you can use external applications with Amazon Connect, see the following topics in the Amazon Connect Administrator Guide: Third-party applications (3p apps) in the agent workspace Use Amazon Q in Connect for generative AI–powered agent assistance in real-time public struct AppIntegrations: AWSService { // MARK: Member variables @@ -73,7 +73,7 @@ public struct AppIntegrations: AWSService { // MARK: API Calls - /// This API is in preview release and subject to change. Creates and persists an Application resource. + /// Creates and persists an Application resource. @Sendable public func createApplication(_ input: CreateApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateApplicationResponse { return try await self.client.execute( @@ -99,6 +99,19 @@ public struct AppIntegrations: AWSService { ) } + /// Creates and persists a DataIntegrationAssociation resource. + @Sendable + public func createDataIntegrationAssociation(_ input: CreateDataIntegrationAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataIntegrationAssociationResponse { + return try await self.client.execute( + operation: "CreateDataIntegrationAssociation", + path: "/dataIntegrations/{DataIntegrationIdentifier}/associations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an EventIntegration, given a specified name, description, and a reference to an Amazon EventBridge bus in your account and a partner event source that pushes events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane. @Sendable public func createEventIntegration(_ input: CreateEventIntegrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEventIntegrationResponse { @@ -153,7 +166,7 @@ public struct AppIntegrations: AWSService { ) } - /// This API is in preview release and subject to change. Get an Application resource. + /// Get an Application resource. @Sendable public func getApplication(_ input: GetApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetApplicationResponse { return try await self.client.execute( @@ -207,7 +220,7 @@ public struct AppIntegrations: AWSService { ) } - /// This API is in preview release and subject to change. Lists applications in the account. + /// Lists applications in the account. @Sendable public func listApplications(_ input: ListApplicationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListApplicationsResponse { return try await self.client.execute( @@ -315,7 +328,7 @@ public struct AppIntegrations: AWSService { ) } - /// This API is in preview release and subject to change. Updates and persists an Application resource. + /// Updates and persists an Application resource. @Sendable public func updateApplication(_ input: UpdateApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateApplicationResponse { return try await self.client.execute( @@ -343,6 +356,19 @@ public struct AppIntegrations: AWSService { ) } + /// Updates and persists a DataIntegrationAssociation resource. Updating a DataIntegrationAssociation with ExecutionConfiguration will rerun the on-demand job. + @Sendable + public func updateDataIntegrationAssociation(_ input: UpdateDataIntegrationAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataIntegrationAssociationResponse { + return try await self.client.execute( + operation: "UpdateDataIntegrationAssociation", + path: "/dataIntegrations/{DataIntegrationIdentifier}/associations/{DataIntegrationAssociationIdentifier}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the description of an event integration. @Sendable public func updateEventIntegration(_ input: UpdateEventIntegrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEventIntegrationResponse { @@ -389,7 +415,7 @@ extension AppIntegrations { ) } - /// This API is in preview release and subject to change. Lists applications in the account. + /// Lists applications in the account. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift b/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift index f2944e9d97..49eac486c3 100644 --- a/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift +++ b/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift @@ -26,6 +26,19 @@ import Foundation extension AppIntegrations { // MARK: Enums + public enum ExecutionMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case onDemand = "ON_DEMAND" + case scheduled = "SCHEDULED" + public var description: String { return self.rawValue } + } + + public enum ExecutionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct ApplicationAssociationSummary: AWSDecodableShape { @@ -211,6 +224,99 @@ extension AppIntegrations { } } + public struct CreateDataIntegrationAssociationRequest: AWSEncodableShape { + /// The mapping of metadata to be extracted from the data. + public let clientAssociationMetadata: [String: String]? + /// The identifier for the client that is associated with the DataIntegration association. + public let clientId: String? + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// A unique identifier for the DataIntegration. + public let dataIntegrationIdentifier: String + /// The URI of the data destination. + public let destinationURI: String? + /// The configuration for how the files should be pulled from the source. + public let executionConfiguration: ExecutionConfiguration? + public let objectConfiguration: [String: [String: [String]]]? + + public init(clientAssociationMetadata: [String: String]? = nil, clientId: String? = nil, clientToken: String? = CreateDataIntegrationAssociationRequest.idempotencyToken(), dataIntegrationIdentifier: String, destinationURI: String? = nil, executionConfiguration: ExecutionConfiguration? = nil, objectConfiguration: [String: [String: [String]]]? = nil) { + self.clientAssociationMetadata = clientAssociationMetadata + self.clientId = clientId + self.clientToken = clientToken + self.dataIntegrationIdentifier = dataIntegrationIdentifier + self.destinationURI = destinationURI + self.executionConfiguration = executionConfiguration + self.objectConfiguration = objectConfiguration + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientAssociationMetadata, forKey: .clientAssociationMetadata) + try container.encodeIfPresent(self.clientId, forKey: .clientId) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodePath(self.dataIntegrationIdentifier, key: "DataIntegrationIdentifier") + try container.encodeIfPresent(self.destinationURI, forKey: .destinationURI) + try container.encodeIfPresent(self.executionConfiguration, forKey: .executionConfiguration) + try container.encodeIfPresent(self.objectConfiguration, forKey: .objectConfiguration) + } + + public func validate(name: String) throws { + try self.clientAssociationMetadata?.forEach { + try validate($0.key, name: "clientAssociationMetadata.key", parent: name, max: 255) + try validate($0.key, name: "clientAssociationMetadata.key", parent: name, min: 1) + try validate($0.key, name: "clientAssociationMetadata.key", parent: name, pattern: "\\S") + try validate($0.value, name: "clientAssociationMetadata[\"\($0.key)\"]", parent: name, max: 255) + try validate($0.value, name: "clientAssociationMetadata[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "clientAssociationMetadata[\"\($0.key)\"]", parent: name, pattern: "\\S") + } + try self.validate(self.clientId, name: "clientId", parent: name, max: 255) + try self.validate(self.clientId, name: "clientId", parent: name, min: 1) + try self.validate(self.clientId, name: "clientId", parent: name, pattern: ".*") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 2048) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: ".*") + try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, max: 255) + try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, min: 1) + try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, pattern: "\\S") + try self.validate(self.destinationURI, name: "destinationURI", parent: name, max: 1000) + try self.validate(self.destinationURI, name: "destinationURI", parent: name, min: 1) + try self.validate(self.destinationURI, name: "destinationURI", parent: name, pattern: "^(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+$)|(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+[\\w/!@#+=.-]+[\\w/!@#+=.,-]+$)$") + try self.executionConfiguration?.validate(name: "\(name).executionConfiguration") + try self.objectConfiguration?.forEach { + try validate($0.key, name: "objectConfiguration.key", parent: name, max: 255) + try validate($0.key, name: "objectConfiguration.key", parent: name, min: 1) + try validate($0.key, name: "objectConfiguration.key", parent: name, pattern: "\\S") + } + } + + private enum CodingKeys: String, CodingKey { + case clientAssociationMetadata = "ClientAssociationMetadata" + case clientId = "ClientId" + case clientToken = "ClientToken" + case destinationURI = "DestinationURI" + case executionConfiguration = "ExecutionConfiguration" + case objectConfiguration = "ObjectConfiguration" + } + } + + public struct CreateDataIntegrationAssociationResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) for the DataIntegration. + public let dataIntegrationArn: String? + /// A unique identifier. for the DataIntegrationAssociation. + public let dataIntegrationAssociationId: String? + + public init(dataIntegrationArn: String? = nil, dataIntegrationAssociationId: String? = nil) { + self.dataIntegrationArn = dataIntegrationArn + self.dataIntegrationAssociationId = dataIntegrationAssociationId + } + + private enum CodingKeys: String, CodingKey { + case dataIntegrationArn = "DataIntegrationArn" + case dataIntegrationAssociationId = "DataIntegrationAssociationId" + } + } + public struct CreateDataIntegrationRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. public let clientToken: String? @@ -218,7 +324,7 @@ extension AppIntegrations { public let description: String? /// The configuration for what files should be pulled from the source. public let fileConfiguration: FileConfiguration? - /// The KMS key for the DataIntegration. + /// The KMS key ARN for the DataIntegration. public let kmsKey: String /// The name of the DataIntegration. public let name: String @@ -227,11 +333,11 @@ extension AppIntegrations { /// The name of the data and how often it should be pulled from the source. public let scheduleConfig: ScheduleConfiguration? /// The URI of the data source. - public let sourceURI: String + public let sourceURI: String? /// The tags used to organize, track, or control access for this resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. public let tags: [String: String]? - public init(clientToken: String? = CreateDataIntegrationRequest.idempotencyToken(), description: String? = nil, fileConfiguration: FileConfiguration? = nil, kmsKey: String, name: String, objectConfiguration: [String: [String: [String]]]? = nil, scheduleConfig: ScheduleConfiguration? = nil, sourceURI: String, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateDataIntegrationRequest.idempotencyToken(), description: String? = nil, fileConfiguration: FileConfiguration? = nil, kmsKey: String, name: String, objectConfiguration: [String: [String: [String]]]? = nil, scheduleConfig: ScheduleConfiguration? = nil, sourceURI: String? = nil, tags: [String: String]? = nil) { self.clientToken = clientToken self.description = description self.fileConfiguration = fileConfiguration @@ -299,7 +405,7 @@ extension AppIntegrations { public let fileConfiguration: FileConfiguration? /// A unique identifier. public let id: String? - /// The KMS key for the DataIntegration. + /// The KMS key ARN for the DataIntegration. public let kmsKey: String? /// The name of the DataIntegration. public let name: String? @@ -417,17 +523,28 @@ extension AppIntegrations { public let dataIntegrationArn: String? /// The Amazon Resource Name (ARN) of the DataIntegration association. public let dataIntegrationAssociationArn: String? + /// The URI of the data destination. + public let destinationURI: String? + public let executionConfiguration: ExecutionConfiguration? + /// The execution status of the last job. + public let lastExecutionStatus: LastExecutionStatus? - public init(clientId: String? = nil, dataIntegrationArn: String? = nil, dataIntegrationAssociationArn: String? = nil) { + public init(clientId: String? = nil, dataIntegrationArn: String? = nil, dataIntegrationAssociationArn: String? = nil, destinationURI: String? = nil, executionConfiguration: ExecutionConfiguration? = nil, lastExecutionStatus: LastExecutionStatus? = nil) { self.clientId = clientId self.dataIntegrationArn = dataIntegrationArn self.dataIntegrationAssociationArn = dataIntegrationAssociationArn + self.destinationURI = destinationURI + self.executionConfiguration = executionConfiguration + self.lastExecutionStatus = lastExecutionStatus } private enum CodingKeys: String, CodingKey { case clientId = "ClientId" case dataIntegrationArn = "DataIntegrationArn" case dataIntegrationAssociationArn = "DataIntegrationAssociationArn" + case destinationURI = "DestinationURI" + case executionConfiguration = "ExecutionConfiguration" + case lastExecutionStatus = "LastExecutionStatus" } } @@ -618,6 +735,30 @@ extension AppIntegrations { } } + public struct ExecutionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The mode for data import/export execution. + public let executionMode: ExecutionMode + public let onDemandConfiguration: OnDemandConfiguration? + public let scheduleConfiguration: ScheduleConfiguration? + + public init(executionMode: ExecutionMode, onDemandConfiguration: OnDemandConfiguration? = nil, scheduleConfiguration: ScheduleConfiguration? = nil) { + self.executionMode = executionMode + self.onDemandConfiguration = onDemandConfiguration + self.scheduleConfiguration = scheduleConfiguration + } + + public func validate(name: String) throws { + try self.onDemandConfiguration?.validate(name: "\(name).onDemandConfiguration") + try self.scheduleConfiguration?.validate(name: "\(name).scheduleConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case executionMode = "ExecutionMode" + case onDemandConfiguration = "OnDemandConfiguration" + case scheduleConfiguration = "ScheduleConfiguration" + } + } + public struct ExternalUrlConfig: AWSEncodableShape & AWSDecodableShape { /// The URL to access the application. public let accessUrl: String @@ -804,13 +945,13 @@ extension AppIntegrations { public struct GetDataIntegrationResponse: AWSDecodableShape { /// The Amazon Resource Name (ARN) for the DataIntegration. public let arn: String? - /// The KMS key for the DataIntegration. + /// The KMS key ARN for the DataIntegration. public let description: String? /// The configuration for what files should be pulled from the source. public let fileConfiguration: FileConfiguration? /// A unique identifier. public let id: String? - /// The KMS key for the DataIntegration. + /// The KMS key ARN for the DataIntegration. public let kmsKey: String? /// The name of the DataIntegration. public let name: String? @@ -906,6 +1047,23 @@ extension AppIntegrations { } } + public struct LastExecutionStatus: AWSDecodableShape { + /// The job status enum string. + public let executionStatus: ExecutionStatus? + /// The status message of a job. + public let statusMessage: String? + + public init(executionStatus: ExecutionStatus? = nil, statusMessage: String? = nil) { + self.executionStatus = executionStatus + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case executionStatus = "ExecutionStatus" + case statusMessage = "StatusMessage" + } + } + public struct ListApplicationAssociationsRequest: AWSEncodableShape { /// A unique identifier for the Application. public let applicationId: String @@ -1245,6 +1403,32 @@ extension AppIntegrations { } } + public struct OnDemandConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The end time for data pull from the source as an Unix/epoch string in milliseconds + public let endTime: String? + /// The start time for data pull from the source as an Unix/epoch string in milliseconds + public let startTime: String + + public init(endTime: String? = nil, startTime: String) { + self.endTime = endTime + self.startTime = startTime + } + + public func validate(name: String) throws { + try self.validate(self.endTime, name: "endTime", parent: name, max: 255) + try self.validate(self.endTime, name: "endTime", parent: name, min: 1) + try self.validate(self.endTime, name: "endTime", parent: name, pattern: "\\S") + try self.validate(self.startTime, name: "startTime", parent: name, max: 255) + try self.validate(self.startTime, name: "startTime", parent: name, min: 1) + try self.validate(self.startTime, name: "startTime", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case startTime = "StartTime" + } + } + public struct Publication: AWSEncodableShape & AWSDecodableShape { /// The description of the publication. public let description: String? @@ -1503,6 +1687,47 @@ extension AppIntegrations { public init() {} } + public struct UpdateDataIntegrationAssociationRequest: AWSEncodableShape { + /// A unique identifier. of the DataIntegrationAssociation resource + public let dataIntegrationAssociationIdentifier: String + /// A unique identifier for the DataIntegration. + public let dataIntegrationIdentifier: String + /// The configuration for how the files should be pulled from the source. + public let executionConfiguration: ExecutionConfiguration + + public init(dataIntegrationAssociationIdentifier: String, dataIntegrationIdentifier: String, executionConfiguration: ExecutionConfiguration) { + self.dataIntegrationAssociationIdentifier = dataIntegrationAssociationIdentifier + self.dataIntegrationIdentifier = dataIntegrationIdentifier + self.executionConfiguration = executionConfiguration + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.dataIntegrationAssociationIdentifier, key: "DataIntegrationAssociationIdentifier") + request.encodePath(self.dataIntegrationIdentifier, key: "DataIntegrationIdentifier") + try container.encode(self.executionConfiguration, forKey: .executionConfiguration) + } + + public func validate(name: String) throws { + try self.validate(self.dataIntegrationAssociationIdentifier, name: "dataIntegrationAssociationIdentifier", parent: name, max: 255) + try self.validate(self.dataIntegrationAssociationIdentifier, name: "dataIntegrationAssociationIdentifier", parent: name, min: 1) + try self.validate(self.dataIntegrationAssociationIdentifier, name: "dataIntegrationAssociationIdentifier", parent: name, pattern: "\\S") + try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, max: 255) + try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, min: 1) + try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, pattern: "\\S") + try self.executionConfiguration.validate(name: "\(name).executionConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case executionConfiguration = "ExecutionConfiguration" + } + } + + public struct UpdateDataIntegrationAssociationResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateDataIntegrationRequest: AWSEncodableShape { /// A description of the DataIntegration. public let description: String? diff --git a/Sources/Soto/Services/AppStream/AppStream_api.swift b/Sources/Soto/Services/AppStream/AppStream_api.swift index 0ea8c3a1e0..fd10d03d33 100644 --- a/Sources/Soto/Services/AppStream/AppStream_api.swift +++ b/Sources/Soto/Services/AppStream/AppStream_api.swift @@ -319,6 +319,19 @@ public struct AppStream: AWSService { ) } + /// Creates custom branding that customizes the appearance of the streaming application catalog page. + @Sendable + public func createThemeForStack(_ input: CreateThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateThemeForStackResult { + return try await self.client.execute( + operation: "CreateThemeForStack", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a new image with the latest Windows operating system updates, driver updates, and AppStream 2.0 agent software. For more information, see the "Update an Image by Using Managed AppStream 2.0 Image Updates" section in Administer Your AppStream 2.0 Images, in the Amazon AppStream 2.0 Administration Guide. @Sendable public func createUpdatedImage(_ input: CreateUpdatedImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUpdatedImageResult { @@ -488,6 +501,19 @@ public struct AppStream: AWSService { ) } + /// Deletes custom branding that customizes the appearance of the streaming application catalog page. + @Sendable + public func deleteThemeForStack(_ input: DeleteThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteThemeForStackResult { + return try await self.client.execute( + operation: "DeleteThemeForStack", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disables usage report generation. @Sendable public func deleteUsageReportSubscription(_ input: DeleteUsageReportSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUsageReportSubscriptionResult { @@ -683,6 +709,19 @@ public struct AppStream: AWSService { ) } + /// Retrieves a list that describes the theme for a specified stack. A theme is custom branding that customizes the appearance of the streaming application catalog page. + @Sendable + public func describeThemeForStack(_ input: DescribeThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeThemeForStackResult { + return try await self.client.execute( + operation: "DescribeThemeForStack", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves a list that describes one or more usage report subscriptions. @Sendable public func describeUsageReportSubscriptions(_ input: DescribeUsageReportSubscriptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUsageReportSubscriptionsResult { @@ -1059,6 +1098,19 @@ public struct AppStream: AWSService { logger: logger ) } + + /// Updates custom branding that customizes the appearance of the streaming application catalog page. + @Sendable + public func updateThemeForStack(_ input: UpdateThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateThemeForStackResult { + return try await self.client.execute( + operation: "UpdateThemeForStack", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } } extension AppStream { diff --git a/Sources/Soto/Services/AppStream/AppStream_shapes.swift b/Sources/Soto/Services/AppStream/AppStream_shapes.swift index f4616eb829..92bf2460ed 100644 --- a/Sources/Soto/Services/AppStream/AppStream_shapes.swift +++ b/Sources/Soto/Services/AppStream/AppStream_shapes.swift @@ -100,6 +100,12 @@ extension AppStream { public var description: String { return self.rawValue } } + public enum DynamicAppProvidersEnabled: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum FleetAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case domainJoinInfo = "DOMAIN_JOIN_INFO" case iamRoleArn = "IAM_ROLE_ARN" @@ -181,6 +187,12 @@ extension AppStream { public var description: String { return self.rawValue } } + public enum ImageSharedWithOthers: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `false` = "FALSE" + case `true` = "TRUE" + public var description: String { return self.rawValue } + } + public enum ImageState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" case copying = "COPYING" @@ -199,6 +211,12 @@ extension AppStream { public var description: String { return self.rawValue } } + public enum LatestAppstreamAgentVersion: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `false` = "FALSE" + case `true` = "TRUE" + public var description: String { return self.rawValue } + } + public enum MessageAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case resend = "RESEND" case suppress = "SUPPRESS" @@ -219,6 +237,7 @@ extension AppStream { public enum PlatformType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case amazonLinux2 = "AMAZON_LINUX2" + case rhel8 = "RHEL8" case windows = "WINDOWS" case windowsServer2016 = "WINDOWS_SERVER_2016" case windowsServer2019 = "WINDOWS_SERVER_2019" @@ -280,6 +299,25 @@ extension AppStream { public var description: String { return self.rawValue } } + public enum ThemeAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case footerLinks = "FOOTER_LINKS" + public var description: String { return self.rawValue } + } + + public enum ThemeState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum ThemeStyling: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blue = "BLUE" + case lightBlue = "LIGHT_BLUE" + case pink = "PINK" + case red = "RED" + public var description: String { return self.rawValue } + } + public enum UsageReportExecutionErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accessDenied = "ACCESS_DENIED" case internalServiceError = "INTERNAL_SERVICE_ERROR" @@ -1316,7 +1354,7 @@ extension AppStream { public let computeCapacity: ComputeCapacity? /// The description to display. public let description: String? - /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000. + /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000. public let disconnectTimeoutInSeconds: Int? /// The fleet name to display. public let displayName: String? @@ -1328,13 +1366,13 @@ extension AppStream { public let fleetType: FleetType? /// The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the appstream_machine_role credential profile on the instance. For more information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. public let iamRoleArn: String? - /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. + /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. public let idleDisconnectTimeoutInSeconds: Int? /// The ARN of the public, private, or shared image to use. public let imageArn: String? /// The name of the image used to create the fleet. public let imageName: String? - /// The instance type to use when launching fleet instances. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge + /// The instance type to use when launching fleet instances. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics.g5.xlarge stream.graphics.g5.2xlarge stream.graphics.g5.4xlarge stream.graphics.g5.8xlarge stream.graphics.g5.12xlarge stream.graphics.g5.16xlarge stream.graphics.g5.24xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge public let instanceType: String? /// The maximum concurrent sessions of the Elastic fleet. This is required for Elastic fleets, and not allowed for other fleet types. public let maxConcurrentSessions: Int? @@ -1750,6 +1788,64 @@ extension AppStream { } } + public struct CreateThemeForStackRequest: AWSEncodableShape { + /// The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions. + public let faviconS3Location: S3Location? + /// The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites. + public let footerLinks: [ThemeFooterLink]? + /// The organization logo that appears on the streaming application catalog page. + public let organizationLogoS3Location: S3Location? + /// The name of the stack for the theme. + public let stackName: String? + /// The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page. + public let themeStyling: ThemeStyling? + /// The title that is displayed at the top of the browser tab during users' application streaming sessions. + public let titleText: String? + + public init(faviconS3Location: S3Location? = nil, footerLinks: [ThemeFooterLink]? = nil, organizationLogoS3Location: S3Location? = nil, stackName: String? = nil, themeStyling: ThemeStyling? = nil, titleText: String? = nil) { + self.faviconS3Location = faviconS3Location + self.footerLinks = footerLinks + self.organizationLogoS3Location = organizationLogoS3Location + self.stackName = stackName + self.themeStyling = themeStyling + self.titleText = titleText + } + + public func validate(name: String) throws { + try self.faviconS3Location?.validate(name: "\(name).faviconS3Location") + try self.footerLinks?.forEach { + try $0.validate(name: "\(name).footerLinks[]") + } + try self.organizationLogoS3Location?.validate(name: "\(name).organizationLogoS3Location") + try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$") + try self.validate(self.titleText, name: "titleText", parent: name, max: 300) + try self.validate(self.titleText, name: "titleText", parent: name, min: 1) + try self.validate(self.titleText, name: "titleText", parent: name, pattern: "^[-@./#&+\\w\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case faviconS3Location = "FaviconS3Location" + case footerLinks = "FooterLinks" + case organizationLogoS3Location = "OrganizationLogoS3Location" + case stackName = "StackName" + case themeStyling = "ThemeStyling" + case titleText = "TitleText" + } + } + + public struct CreateThemeForStackResult: AWSDecodableShape { + /// The theme object that contains the metadata of the custom branding. + public let theme: Theme? + + public init(theme: Theme? = nil) { + self.theme = theme + } + + private enum CodingKeys: String, CodingKey { + case theme = "Theme" + } + } + public struct CreateUpdatedImageRequest: AWSEncodableShape { /// Indicates whether to display the status of image update availability before AppStream 2.0 initiates the process of creating a new updated image. If this value is set to true, AppStream 2.0 displays whether image updates are available. If this value is set to false, AppStream 2.0 initiates the process of creating a new updated image without displaying whether image updates are available. public let dryRun: Bool? @@ -2113,6 +2209,27 @@ extension AppStream { public init() {} } + public struct DeleteThemeForStackRequest: AWSEncodableShape { + /// The name of the stack for the theme. + public let stackName: String? + + public init(stackName: String? = nil) { + self.stackName = stackName + } + + public func validate(name: String) throws { + try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$") + } + + private enum CodingKeys: String, CodingKey { + case stackName = "StackName" + } + } + + public struct DeleteThemeForStackResult: AWSDecodableShape { + public init() {} + } + public struct DeleteUsageReportSubscriptionRequest: AWSEncodableShape { public init() {} } @@ -2775,6 +2892,36 @@ extension AppStream { } } + public struct DescribeThemeForStackRequest: AWSEncodableShape { + /// The name of the stack for the theme. + public let stackName: String? + + public init(stackName: String? = nil) { + self.stackName = stackName + } + + public func validate(name: String) throws { + try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$") + } + + private enum CodingKeys: String, CodingKey { + case stackName = "StackName" + } + } + + public struct DescribeThemeForStackResult: AWSDecodableShape { + /// The theme object that contains the metadata of the custom branding. + public let theme: Theme? + + public init(theme: Theme? = nil) { + self.theme = theme + } + + private enum CodingKeys: String, CodingKey { + case theme = "Theme" + } + } + public struct DescribeUsageReportSubscriptionsRequest: AWSEncodableShape { /// The maximum size of each page of results. public let maxResults: Int? @@ -3243,7 +3390,7 @@ extension AppStream { public let createdTime: Date? /// The description to display. public let description: String? - /// The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000. + /// The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000. public let disconnectTimeoutInSeconds: Int? /// The fleet name to display. public let displayName: String? @@ -3257,7 +3404,7 @@ extension AppStream { public let fleetType: FleetType? /// The ARN of the IAM role that is applied to the fleet. To assume a role, the fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the appstream_machine_role credential profile on the instance. For more information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. public let iamRoleArn: String? - /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. + /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. public let idleDisconnectTimeoutInSeconds: Int? /// The ARN for the public, private, or shared image. public let imageArn: String? @@ -3375,6 +3522,8 @@ extension AppStream { public let description: String? /// The image name to display. public let displayName: String? + /// Indicates whether dynamic app providers are enabled within an AppStream 2.0 image or not. + public let dynamicAppProvidersEnabled: DynamicAppProvidersEnabled? /// The name of the image builder that was used to create the private image. If the image is shared, this value is null. public let imageBuilderName: String? /// Indicates whether an image builder can be launched from this image. @@ -3383,6 +3532,10 @@ extension AppStream { public let imageErrors: [ResourceError]? /// The permissions to provide to the destination AWS account for the specified image. public let imagePermissions: ImagePermissions? + /// Indicates whether the image is shared with another account ID. + public let imageSharedWithOthers: ImageSharedWithOthers? + /// Indicates whether the image is using the latest AppStream 2.0 agent version or not. + public let latestAppstreamAgentVersion: LatestAppstreamAgentVersion? /// The name of the image. public let name: String? /// The operating system platform of the image. @@ -3393,10 +3546,12 @@ extension AppStream { public let state: ImageState? /// The reason why the last state change occurred. public let stateChangeReason: ImageStateChangeReason? + /// The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported: General Purpose Compute Optimized Memory Optimized Graphics Graphics Design Graphics Pro Graphics G4 Graphics G5 + public let supportedInstanceFamilies: [String]? /// Indicates whether the image is public or private. public let visibility: VisibilityType? - public init(applications: [Application]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, baseImageArn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, imageBuilderName: String? = nil, imageBuilderSupported: Bool? = nil, imageErrors: [ResourceError]? = nil, imagePermissions: ImagePermissions? = nil, name: String? = nil, platform: PlatformType? = nil, publicBaseImageReleasedDate: Date? = nil, state: ImageState? = nil, stateChangeReason: ImageStateChangeReason? = nil, visibility: VisibilityType? = nil) { + public init(applications: [Application]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, baseImageArn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, dynamicAppProvidersEnabled: DynamicAppProvidersEnabled? = nil, imageBuilderName: String? = nil, imageBuilderSupported: Bool? = nil, imageErrors: [ResourceError]? = nil, imagePermissions: ImagePermissions? = nil, imageSharedWithOthers: ImageSharedWithOthers? = nil, latestAppstreamAgentVersion: LatestAppstreamAgentVersion? = nil, name: String? = nil, platform: PlatformType? = nil, publicBaseImageReleasedDate: Date? = nil, state: ImageState? = nil, stateChangeReason: ImageStateChangeReason? = nil, supportedInstanceFamilies: [String]? = nil, visibility: VisibilityType? = nil) { self.applications = applications self.appstreamAgentVersion = appstreamAgentVersion self.arn = arn @@ -3404,15 +3559,19 @@ extension AppStream { self.createdTime = createdTime self.description = description self.displayName = displayName + self.dynamicAppProvidersEnabled = dynamicAppProvidersEnabled self.imageBuilderName = imageBuilderName self.imageBuilderSupported = imageBuilderSupported self.imageErrors = imageErrors self.imagePermissions = imagePermissions + self.imageSharedWithOthers = imageSharedWithOthers + self.latestAppstreamAgentVersion = latestAppstreamAgentVersion self.name = name self.platform = platform self.publicBaseImageReleasedDate = publicBaseImageReleasedDate self.state = state self.stateChangeReason = stateChangeReason + self.supportedInstanceFamilies = supportedInstanceFamilies self.visibility = visibility } @@ -3424,15 +3583,19 @@ extension AppStream { case createdTime = "CreatedTime" case description = "Description" case displayName = "DisplayName" + case dynamicAppProvidersEnabled = "DynamicAppProvidersEnabled" case imageBuilderName = "ImageBuilderName" case imageBuilderSupported = "ImageBuilderSupported" case imageErrors = "ImageErrors" case imagePermissions = "ImagePermissions" + case imageSharedWithOthers = "ImageSharedWithOthers" + case latestAppstreamAgentVersion = "LatestAppstreamAgentVersion" case name = "Name" case platform = "Platform" case publicBaseImageReleasedDate = "PublicBaseImageReleasedDate" case state = "State" case stateChangeReason = "StateChangeReason" + case supportedInstanceFamilies = "SupportedInstanceFamilies" case visibility = "Visibility" } } @@ -3462,6 +3625,8 @@ extension AppStream { public let imageBuilderErrors: [ResourceError]? /// The instance type for the image builder. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge public let instanceType: String? + /// Indicates whether the image builder is using the latest AppStream 2.0 agent version or not. + public let latestAppstreamAgentVersion: LatestAppstreamAgentVersion? /// The name of the image builder. public let name: String? public let networkAccessConfiguration: NetworkAccessConfiguration? @@ -3474,7 +3639,7 @@ extension AppStream { /// The VPC configuration of the image builder. public let vpcConfig: VpcConfig? - public init(accessEndpoints: [AccessEndpoint]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, domainJoinInfo: DomainJoinInfo? = nil, enableDefaultInternetAccess: Bool? = nil, iamRoleArn: String? = nil, imageArn: String? = nil, imageBuilderErrors: [ResourceError]? = nil, instanceType: String? = nil, name: String? = nil, networkAccessConfiguration: NetworkAccessConfiguration? = nil, platform: PlatformType? = nil, state: ImageBuilderState? = nil, stateChangeReason: ImageBuilderStateChangeReason? = nil, vpcConfig: VpcConfig? = nil) { + public init(accessEndpoints: [AccessEndpoint]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, domainJoinInfo: DomainJoinInfo? = nil, enableDefaultInternetAccess: Bool? = nil, iamRoleArn: String? = nil, imageArn: String? = nil, imageBuilderErrors: [ResourceError]? = nil, instanceType: String? = nil, latestAppstreamAgentVersion: LatestAppstreamAgentVersion? = nil, name: String? = nil, networkAccessConfiguration: NetworkAccessConfiguration? = nil, platform: PlatformType? = nil, state: ImageBuilderState? = nil, stateChangeReason: ImageBuilderStateChangeReason? = nil, vpcConfig: VpcConfig? = nil) { self.accessEndpoints = accessEndpoints self.appstreamAgentVersion = appstreamAgentVersion self.arn = arn @@ -3487,6 +3652,7 @@ extension AppStream { self.imageArn = imageArn self.imageBuilderErrors = imageBuilderErrors self.instanceType = instanceType + self.latestAppstreamAgentVersion = latestAppstreamAgentVersion self.name = name self.networkAccessConfiguration = networkAccessConfiguration self.platform = platform @@ -3508,6 +3674,7 @@ extension AppStream { case imageArn = "ImageArn" case imageBuilderErrors = "ImageBuilderErrors" case instanceType = "InstanceType" + case latestAppstreamAgentVersion = "LatestAppstreamAgentVersion" case name = "Name" case networkAccessConfiguration = "NetworkAccessConfiguration" case platform = "Platform" @@ -4254,6 +4421,72 @@ extension AppStream { public init() {} } + public struct Theme: AWSDecodableShape { + /// The time the theme was created. + public let createdTime: Date? + /// The stack that has the custom branding theme. + public let stackName: String? + /// The state of the theme. + public let state: ThemeState? + /// The URL of the icon that displays at the top of a user's browser tab during streaming sessions. + public let themeFaviconURL: String? + /// The website links that display in the catalog page footer. + public let themeFooterLinks: [ThemeFooterLink]? + /// The URL of the logo that displays in the catalog page header. + public let themeOrganizationLogoURL: String? + /// The color that is used for the website links, text, buttons, and catalog page background. + public let themeStyling: ThemeStyling? + /// The browser tab page title. + public let themeTitleText: String? + + public init(createdTime: Date? = nil, stackName: String? = nil, state: ThemeState? = nil, themeFaviconURL: String? = nil, themeFooterLinks: [ThemeFooterLink]? = nil, themeOrganizationLogoURL: String? = nil, themeStyling: ThemeStyling? = nil, themeTitleText: String? = nil) { + self.createdTime = createdTime + self.stackName = stackName + self.state = state + self.themeFaviconURL = themeFaviconURL + self.themeFooterLinks = themeFooterLinks + self.themeOrganizationLogoURL = themeOrganizationLogoURL + self.themeStyling = themeStyling + self.themeTitleText = themeTitleText + } + + private enum CodingKeys: String, CodingKey { + case createdTime = "CreatedTime" + case stackName = "StackName" + case state = "State" + case themeFaviconURL = "ThemeFaviconURL" + case themeFooterLinks = "ThemeFooterLinks" + case themeOrganizationLogoURL = "ThemeOrganizationLogoURL" + case themeStyling = "ThemeStyling" + case themeTitleText = "ThemeTitleText" + } + } + + public struct ThemeFooterLink: AWSEncodableShape & AWSDecodableShape { + /// The name of the websites that display in the catalog page footer. + public let displayName: String? + /// The URL of the websites that display in the catalog page footer. + public let footerLinkURL: String? + + public init(displayName: String? = nil, footerLinkURL: String? = nil) { + self.displayName = displayName + self.footerLinkURL = footerLinkURL + } + + public func validate(name: String) throws { + try self.validate(self.displayName, name: "displayName", parent: name, max: 300) + try self.validate(self.displayName, name: "displayName", parent: name, min: 1) + try self.validate(self.displayName, name: "displayName", parent: name, pattern: "^[-@./#&+\\w\\s]*$") + try self.validate(self.footerLinkURL, name: "footerLinkURL", parent: name, max: 1000) + try self.validate(self.footerLinkURL, name: "footerLinkURL", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case displayName = "DisplayName" + case footerLinkURL = "FooterLinkURL" + } + } + public struct UntagResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the resource. public let resourceArn: String? @@ -4537,7 +4770,7 @@ extension AppStream { public let deleteVpcConfig: Bool? /// The description to display. public let description: String? - /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000. + /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000. public let disconnectTimeoutInSeconds: Int? /// The fleet name to display. public let displayName: String? @@ -4547,7 +4780,7 @@ extension AppStream { public let enableDefaultInternetAccess: Bool? /// The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the appstream_machine_role credential profile on the instance. For more information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. public let iamRoleArn: String? - /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. + /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. public let idleDisconnectTimeoutInSeconds: Int? /// The ARN of the public, private, or shared image to use. public let imageArn: String? @@ -4826,6 +5059,72 @@ extension AppStream { } } + public struct UpdateThemeForStackRequest: AWSEncodableShape { + /// The attributes to delete. + public let attributesToDelete: [ThemeAttribute]? + /// The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions. + public let faviconS3Location: S3Location? + /// The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites. + public let footerLinks: [ThemeFooterLink]? + /// The organization logo that appears on the streaming application catalog page. + public let organizationLogoS3Location: S3Location? + /// The name of the stack for the theme. + public let stackName: String? + /// Specifies whether custom branding should be applied to catalog page or not. + public let state: ThemeState? + /// The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page. + public let themeStyling: ThemeStyling? + /// The title that is displayed at the top of the browser tab during users' application streaming sessions. + public let titleText: String? + + public init(attributesToDelete: [ThemeAttribute]? = nil, faviconS3Location: S3Location? = nil, footerLinks: [ThemeFooterLink]? = nil, organizationLogoS3Location: S3Location? = nil, stackName: String? = nil, state: ThemeState? = nil, themeStyling: ThemeStyling? = nil, titleText: String? = nil) { + self.attributesToDelete = attributesToDelete + self.faviconS3Location = faviconS3Location + self.footerLinks = footerLinks + self.organizationLogoS3Location = organizationLogoS3Location + self.stackName = stackName + self.state = state + self.themeStyling = themeStyling + self.titleText = titleText + } + + public func validate(name: String) throws { + try self.faviconS3Location?.validate(name: "\(name).faviconS3Location") + try self.footerLinks?.forEach { + try $0.validate(name: "\(name).footerLinks[]") + } + try self.organizationLogoS3Location?.validate(name: "\(name).organizationLogoS3Location") + try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$") + try self.validate(self.titleText, name: "titleText", parent: name, max: 300) + try self.validate(self.titleText, name: "titleText", parent: name, min: 1) + try self.validate(self.titleText, name: "titleText", parent: name, pattern: "^[-@./#&+\\w\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case attributesToDelete = "AttributesToDelete" + case faviconS3Location = "FaviconS3Location" + case footerLinks = "FooterLinks" + case organizationLogoS3Location = "OrganizationLogoS3Location" + case stackName = "StackName" + case state = "State" + case themeStyling = "ThemeStyling" + case titleText = "TitleText" + } + } + + public struct UpdateThemeForStackResult: AWSDecodableShape { + /// The theme object that contains the metadata of the custom branding. + public let theme: Theme? + + public init(theme: Theme? = nil) { + self.theme = theme + } + + private enum CodingKeys: String, CodingKey { + case theme = "Theme" + } + } + public struct UsageReportSubscription: AWSDecodableShape { /// The time when the last usage report was generated. public let lastGeneratedReportDate: Date? @@ -4895,7 +5194,7 @@ extension AppStream { public struct UserSetting: AWSEncodableShape & AWSDecodableShape { /// The action that is enabled or disabled. public let action: Action? - /// Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session. This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions. This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED. This can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets. The value can be between 1 and 20,971,520 (20 MB). + /// Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session. This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions. This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED. The value can be between 1 and 20,971,520 (20 MB). public let maximumLength: Int? /// Indicates whether the action is enabled or disabled. public let permission: Permission? diff --git a/Sources/Soto/Services/AppSync/AppSync_api.swift b/Sources/Soto/Services/AppSync/AppSync_api.swift index 514ebe3d0d..d4f9d905a8 100644 --- a/Sources/Soto/Services/AppSync/AppSync_api.swift +++ b/Sources/Soto/Services/AppSync/AppSync_api.swift @@ -915,3 +915,303 @@ extension AppSync { self.config = from.config.with(patch: patch) } } + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension AppSync { + /// Lists the API keys for a given API. API keys are deleted automatically 60 days after they expire. However, they may still be included in the response until they have actually been deleted. You can safely call DeleteApiKey to manually delete a key before it's automatically deleted. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listApiKeysPaginator( + _ input: ListApiKeysRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listApiKeys, + inputKey: \ListApiKeysRequest.nextToken, + outputKey: \ListApiKeysResponse.nextToken, + logger: logger + ) + } + + /// Lists the data sources for a given API. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listDataSourcesPaginator( + _ input: ListDataSourcesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataSources, + inputKey: \ListDataSourcesRequest.nextToken, + outputKey: \ListDataSourcesResponse.nextToken, + logger: logger + ) + } + + /// Lists multiple custom domain names. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listDomainNamesPaginator( + _ input: ListDomainNamesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDomainNames, + inputKey: \ListDomainNamesRequest.nextToken, + outputKey: \ListDomainNamesResponse.nextToken, + logger: logger + ) + } + + /// List multiple functions. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listFunctionsPaginator( + _ input: ListFunctionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listFunctions, + inputKey: \ListFunctionsRequest.nextToken, + outputKey: \ListFunctionsResponse.nextToken, + logger: logger + ) + } + + /// Lists your GraphQL APIs. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listGraphqlApisPaginator( + _ input: ListGraphqlApisRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listGraphqlApis, + inputKey: \ListGraphqlApisRequest.nextToken, + outputKey: \ListGraphqlApisResponse.nextToken, + logger: logger + ) + } + + /// Lists the resolvers for a given API and type. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listResolversPaginator( + _ input: ListResolversRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listResolvers, + inputKey: \ListResolversRequest.nextToken, + outputKey: \ListResolversResponse.nextToken, + logger: logger + ) + } + + /// List the resolvers that are associated with a specific function. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listResolversByFunctionPaginator( + _ input: ListResolversByFunctionRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listResolversByFunction, + inputKey: \ListResolversByFunctionRequest.nextToken, + outputKey: \ListResolversByFunctionResponse.nextToken, + logger: logger + ) + } + + /// Lists the SourceApiAssociationSummary data. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listSourceApiAssociationsPaginator( + _ input: ListSourceApiAssociationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSourceApiAssociations, + inputKey: \ListSourceApiAssociationsRequest.nextToken, + outputKey: \ListSourceApiAssociationsResponse.nextToken, + logger: logger + ) + } + + /// Lists the types for a given API. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTypesPaginator( + _ input: ListTypesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTypes, + inputKey: \ListTypesRequest.nextToken, + outputKey: \ListTypesResponse.nextToken, + logger: logger + ) + } + + /// Lists Type objects by the source API association ID. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTypesByAssociationPaginator( + _ input: ListTypesByAssociationRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTypesByAssociation, + inputKey: \ListTypesByAssociationRequest.nextToken, + outputKey: \ListTypesByAssociationResponse.nextToken, + logger: logger + ) + } +} + +extension AppSync.ListApiKeysRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListApiKeysRequest { + return .init( + apiId: self.apiId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension AppSync.ListDataSourcesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListDataSourcesRequest { + return .init( + apiId: self.apiId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension AppSync.ListDomainNamesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListDomainNamesRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension AppSync.ListFunctionsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListFunctionsRequest { + return .init( + apiId: self.apiId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension AppSync.ListGraphqlApisRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListGraphqlApisRequest { + return .init( + apiType: self.apiType, + maxResults: self.maxResults, + nextToken: token, + owner: self.owner + ) + } +} + +extension AppSync.ListResolversByFunctionRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListResolversByFunctionRequest { + return .init( + apiId: self.apiId, + functionId: self.functionId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension AppSync.ListResolversRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListResolversRequest { + return .init( + apiId: self.apiId, + maxResults: self.maxResults, + nextToken: token, + typeName: self.typeName + ) + } +} + +extension AppSync.ListSourceApiAssociationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListSourceApiAssociationsRequest { + return .init( + apiId: self.apiId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension AppSync.ListTypesByAssociationRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListTypesByAssociationRequest { + return .init( + associationId: self.associationId, + format: self.format, + maxResults: self.maxResults, + mergedApiIdentifier: self.mergedApiIdentifier, + nextToken: token + ) + } +} + +extension AppSync.ListTypesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppSync.ListTypesRequest { + return .init( + apiId: self.apiId, + format: self.format, + maxResults: self.maxResults, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/ApplicationAutoScaling/ApplicationAutoScaling_shapes.swift b/Sources/Soto/Services/ApplicationAutoScaling/ApplicationAutoScaling_shapes.swift index 2c4003e906..84bd654328 100644 --- a/Sources/Soto/Services/ApplicationAutoScaling/ApplicationAutoScaling_shapes.swift +++ b/Sources/Soto/Services/ApplicationAutoScaling/ApplicationAutoScaling_shapes.swift @@ -71,7 +71,9 @@ extension ApplicationAutoScaling { case neptuneReaderAverageCPUUtilization = "NeptuneReaderAverageCPUUtilization" case rdsReaderAverageCPUUtilization = "RDSReaderAverageCPUUtilization" case rdsReaderAverageDatabaseConnections = "RDSReaderAverageDatabaseConnections" + case sageMakerInferenceComponentConcurrentRequestsPerCopyHighResolution = "SageMakerInferenceComponentConcurrentRequestsPerCopyHighResolution" case sageMakerInferenceComponentInvocationsPerCopy = "SageMakerInferenceComponentInvocationsPerCopy" + case sageMakerVariantConcurrentRequestsPerModelHighResolution = "SageMakerVariantConcurrentRequestsPerModelHighResolution" case sageMakerVariantInvocationsPerInstance = "SageMakerVariantInvocationsPerInstance" case sageMakerVariantProvisionedConcurrencyUtilization = "SageMakerVariantProvisionedConcurrencyUtilization" case workSpacesAverageUserSessionsCapacityUtilization = "WorkSpacesAverageUserSessionsCapacityUtilization" diff --git a/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift index 0209bee957..e3c92adc35 100644 --- a/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift +++ b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS ApplicationSignals service. /// -/// This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability release. Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. +/// Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, and application names within dashboards and maps. public struct ApplicationSignals: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift index fae3f11845..e00658efd4 100644 --- a/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift +++ b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift @@ -309,11 +309,11 @@ extension ApplicationSignals { } public struct GetServiceInput: AWSEncodableShape { - /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let endTime: Date /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. public let keyAttributes: [String: String] - /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let startTime: Date public init(endTime: Date, keyAttributes: [String: String], startTime: Date) { @@ -381,21 +381,25 @@ extension ApplicationSignals { } public struct GetServiceOutput: AWSDecodableShape { - /// The end time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057. + /// The end time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057. This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let endTime: Date + /// An array of string-to-string maps that each contain information about one log group associated with this service. Each string-to-string map includes the following fields: "Type": "AWS::Resource" "ResourceType": "AWS::Logs::LogGroup" "Identifier": "name-of-log-group" + public let logGroupReferences: [[String: String]]? /// A structure containing information about the service. public let service: Service - /// The start time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057. + /// The start time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057. This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let startTime: Date - public init(endTime: Date, service: Service, startTime: Date) { + public init(endTime: Date, logGroupReferences: [[String: String]]? = nil, service: Service, startTime: Date) { self.endTime = endTime + self.logGroupReferences = logGroupReferences self.service = service self.startTime = startTime } private enum CodingKeys: String, CodingKey { case endTime = "EndTime" + case logGroupReferences = "LogGroupReferences" case service = "Service" case startTime = "StartTime" } @@ -427,7 +431,7 @@ extension ApplicationSignals { } public struct ListServiceDependenciesInput: AWSEncodableShape { - /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested end time will be rounded to the nearest hour. public let endTime: Date /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. public let keyAttributes: [String: String] @@ -435,7 +439,7 @@ extension ApplicationSignals { public let maxResults: Int? /// Include this value, if it was returned by the previous operation, to get the next set of service dependencies. public let nextToken: String? - /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let startTime: Date public init(endTime: Date, keyAttributes: [String: String], maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { @@ -475,13 +479,13 @@ extension ApplicationSignals { } public struct ListServiceDependenciesOutput: AWSDecodableShape { - /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let endTime: Date /// Include this value in your next use of this API to get next set of service dependencies. public let nextToken: String? /// An array, where each object in the array contains information about one of the dependencies of this service. public let serviceDependencies: [ServiceDependency] - /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let startTime: Date public init(endTime: Date, nextToken: String? = nil, serviceDependencies: [ServiceDependency], startTime: Date) { @@ -500,7 +504,7 @@ extension ApplicationSignals { } public struct ListServiceDependentsInput: AWSEncodableShape { - /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let endTime: Date /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. public let keyAttributes: [String: String] @@ -508,7 +512,7 @@ extension ApplicationSignals { public let maxResults: Int? /// Include this value, if it was returned by the previous operation, to get the next set of service dependents. public let nextToken: String? - /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let startTime: Date public init(endTime: Date, keyAttributes: [String: String], maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { @@ -548,13 +552,13 @@ extension ApplicationSignals { } public struct ListServiceDependentsOutput: AWSDecodableShape { - /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let endTime: Date /// Include this value in your next use of this API to get next set of service dependents. public let nextToken: String? /// An array, where each object in the array contains information about one of the dependents of this service. public let serviceDependents: [ServiceDependent] - /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let startTime: Date public init(endTime: Date, nextToken: String? = nil, serviceDependents: [ServiceDependent], startTime: Date) { @@ -636,7 +640,7 @@ extension ApplicationSignals { } public struct ListServiceOperationsInput: AWSEncodableShape { - /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested end time will be rounded to the nearest hour. public let endTime: Date /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. public let keyAttributes: [String: String] @@ -644,7 +648,7 @@ extension ApplicationSignals { public let maxResults: Int? /// Include this value, if it was returned by the previous operation, to get the next set of service operations. public let nextToken: String? - /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let startTime: Date public init(endTime: Date, keyAttributes: [String: String], maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { @@ -684,13 +688,13 @@ extension ApplicationSignals { } public struct ListServiceOperationsOutput: AWSDecodableShape { - /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let endTime: Date /// Include this value in your next use of this API to get next set of service operations. public let nextToken: String? /// An array of structures that each contain information about one operation of this service. public let serviceOperations: [ServiceOperation] - /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let startTime: Date public init(endTime: Date, nextToken: String? = nil, serviceOperations: [ServiceOperation], startTime: Date) { @@ -709,13 +713,13 @@ extension ApplicationSignals { } public struct ListServicesInput: AWSEncodableShape { - /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let endTime: Date /// The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used. public let maxResults: Int? /// Include this value, if it was returned by the previous operation, to get the next set of services. public let nextToken: String? - /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour. public let startTime: Date public init(endTime: Date, maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { @@ -743,13 +747,13 @@ extension ApplicationSignals { } public struct ListServicesOutput: AWSDecodableShape { - /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let endTime: Date /// Include this value in your next use of this API to get next set of services. public let nextToken: String? /// An array of structures, where each structure contains some information about a service. To get complete information about a service, use GetService. public let serviceSummaries: [ServiceSummary] - /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour. public let startTime: Date public init(endTime: Date, nextToken: String? = nil, serviceSummaries: [ServiceSummary], startTime: Date) { @@ -964,18 +968,22 @@ extension ApplicationSignals { public let attributeMaps: [[String: String]]? /// This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. public let keyAttributes: [String: String] + /// An array of string-to-string maps that each contain information about one log group associated with this service. Each string-to-string map includes the following fields: "Type": "AWS::Resource" "ResourceType": "AWS::Logs::LogGroup" "Identifier": "name-of-log-group" + public let logGroupReferences: [[String: String]]? /// An array of structures that each contain information about one metric associated with this service. public let metricReferences: [MetricReference] - public init(attributeMaps: [[String: String]]? = nil, keyAttributes: [String: String], metricReferences: [MetricReference]) { + public init(attributeMaps: [[String: String]]? = nil, keyAttributes: [String: String], logGroupReferences: [[String: String]]? = nil, metricReferences: [MetricReference]) { self.attributeMaps = attributeMaps self.keyAttributes = keyAttributes + self.logGroupReferences = logGroupReferences self.metricReferences = metricReferences } private enum CodingKeys: String, CodingKey { case attributeMaps = "AttributeMaps" case keyAttributes = "KeyAttributes" + case logGroupReferences = "LogGroupReferences" case metricReferences = "MetricReferences" } } diff --git a/Sources/Soto/Services/Athena/Athena_api.swift b/Sources/Soto/Services/Athena/Athena_api.swift index d5fe57a9f4..21bf3a0756 100644 --- a/Sources/Soto/Services/Athena/Athena_api.swift +++ b/Sources/Soto/Services/Athena/Athena_api.swift @@ -110,6 +110,8 @@ public struct Athena: AWSService { "us-west-2": "athena.us-west-2.api.aws" ]), [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "athena-fips.ca-central-1.api.aws", + "ca-west-1": "athena-fips.ca-west-1.api.aws", "us-east-1": "athena-fips.us-east-1.api.aws", "us-east-2": "athena-fips.us-east-2.api.aws", "us-gov-east-1": "athena-fips.us-gov-east-1.api.aws", @@ -118,6 +120,8 @@ public struct Athena: AWSService { "us-west-2": "athena-fips.us-west-2.api.aws" ]), [.fips]: .init(endpoints: [ + "ca-central-1": "athena-fips.ca-central-1.amazonaws.com", + "ca-west-1": "athena-fips.ca-west-1.amazonaws.com", "us-east-1": "athena-fips.us-east-1.amazonaws.com", "us-east-2": "athena-fips.us-east-2.amazonaws.com", "us-gov-east-1": "athena-fips.us-gov-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift b/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift index c7cf517aa1..f39b936e82 100644 --- a/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift +++ b/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift @@ -1228,7 +1228,7 @@ extension AutoScaling { try self.trafficSources?.forEach { try $0.validate(name: "\(name).trafficSources[]") } - try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, max: 2047) + try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, max: 5000) try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, min: 1) try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") } @@ -5444,7 +5444,7 @@ extension AutoScaling { try validate($0, name: "terminationPolicies[]", parent: name, min: 1) try validate($0, name: "terminationPolicies[]", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") } - try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, max: 2047) + try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, max: 5000) try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, min: 1) try self.validate(self.vpcZoneIdentifier, name: "vpcZoneIdentifier", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") } diff --git a/Sources/Soto/Services/AutoScalingPlans/AutoScalingPlans_api.swift b/Sources/Soto/Services/AutoScalingPlans/AutoScalingPlans_api.swift index 4202799230..b0d40c767b 100644 --- a/Sources/Soto/Services/AutoScalingPlans/AutoScalingPlans_api.swift +++ b/Sources/Soto/Services/AutoScalingPlans/AutoScalingPlans_api.swift @@ -61,7 +61,6 @@ public struct AutoScalingPlans: AWSService { serviceProtocol: .json(version: "1.1"), apiVersion: "2018-01-06", endpoint: endpoint, - serviceEndpoints: Self.serviceEndpoints, variantEndpoints: Self.variantEndpoints, errorType: AutoScalingPlansErrorType.self, middleware: middleware, @@ -72,11 +71,6 @@ public struct AutoScalingPlans: AWSService { } - /// custom endpoints for regions - static var serviceEndpoints: [String: String] {[ - "us-gov-east-1": "autoscaling-plans.us-gov-east-1.amazonaws.com", - "us-gov-west-1": "autoscaling-plans.us-gov-west-1.amazonaws.com" - ]} /// FIPS and dualstack endpoints diff --git a/Sources/Soto/Services/Batch/Batch_api.swift b/Sources/Soto/Services/Batch/Batch_api.swift index 960e3f91d0..dfb66e64f5 100644 --- a/Sources/Soto/Services/Batch/Batch_api.swift +++ b/Sources/Soto/Services/Batch/Batch_api.swift @@ -111,7 +111,7 @@ public struct Batch: AWSService { // MARK: API Calls - /// Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED or PENDING are canceled. A job inRUNNABLE remains in RUNNABLE until it reaches the head of the job queue. Then the job status is updated to FAILED. A PENDING job is canceled after all dependency jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING status. When you try to cancel an array parent job in PENDING, Batch attempts to cancel all child jobs. The array parent job is canceled when all child jobs are completed. Jobs that progressed to the STARTING or RUNNING state aren't canceled. However, the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation. + /// Cancels a job in an Batch job queue. Jobs that are in a SUBMITTED, PENDING, or RUNNABLE state are cancelled and the job status is updated to FAILED. A PENDING job is canceled after all dependency jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING status. When you try to cancel an array parent job in PENDING, Batch attempts to cancel all child jobs. The array parent job is canceled when all child jobs are completed. Jobs that progressed to the STARTING or RUNNING state aren't canceled. However, the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation. @Sendable public func cancelJob(_ input: CancelJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelJobResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/Batch/Batch_shapes.swift b/Sources/Soto/Services/Batch/Batch_shapes.swift index f89edd5b5d..6f7efa900e 100644 --- a/Sources/Soto/Services/Batch/Batch_shapes.swift +++ b/Sources/Soto/Services/Batch/Batch_shapes.swift @@ -376,6 +376,8 @@ extension Batch { public let computeResources: ComputeResource? /// The orchestration type of the compute environment. The valid values are ECS (default) or EKS. public let containerOrchestrationType: OrchestrationType? + /// Reserved. + public let context: String? /// The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster that the compute environment uses. public let ecsClusterArn: String? /// The configuration for the Amazon EKS cluster that supports the Batch compute environment. Only specify this parameter if the containerOrchestrationType is EKS. @@ -399,11 +401,12 @@ extension Batch { /// Unique identifier for the compute environment. public let uuid: String? - public init(computeEnvironmentArn: String? = nil, computeEnvironmentName: String? = nil, computeResources: ComputeResource? = nil, containerOrchestrationType: OrchestrationType? = nil, ecsClusterArn: String? = nil, eksConfiguration: EksConfiguration? = nil, serviceRole: String? = nil, state: CEState? = nil, status: CEStatus? = nil, statusReason: String? = nil, tags: [String: String]? = nil, type: CEType? = nil, unmanagedvCpus: Int? = nil, updatePolicy: UpdatePolicy? = nil, uuid: String? = nil) { + public init(computeEnvironmentArn: String? = nil, computeEnvironmentName: String? = nil, computeResources: ComputeResource? = nil, containerOrchestrationType: OrchestrationType? = nil, context: String? = nil, ecsClusterArn: String? = nil, eksConfiguration: EksConfiguration? = nil, serviceRole: String? = nil, state: CEState? = nil, status: CEStatus? = nil, statusReason: String? = nil, tags: [String: String]? = nil, type: CEType? = nil, unmanagedvCpus: Int? = nil, updatePolicy: UpdatePolicy? = nil, uuid: String? = nil) { self.computeEnvironmentArn = computeEnvironmentArn self.computeEnvironmentName = computeEnvironmentName self.computeResources = computeResources self.containerOrchestrationType = containerOrchestrationType + self.context = context self.ecsClusterArn = ecsClusterArn self.eksConfiguration = eksConfiguration self.serviceRole = serviceRole @@ -422,6 +425,7 @@ extension Batch { case computeEnvironmentName = "computeEnvironmentName" case computeResources = "computeResources" case containerOrchestrationType = "containerOrchestrationType" + case context = "context" case ecsClusterArn = "ecsClusterArn" case eksConfiguration = "eksConfiguration" case serviceRole = "serviceRole" @@ -958,6 +962,8 @@ extension Batch { public let computeEnvironmentName: String? /// Details about the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see Compute Environments in the Batch User Guide. public let computeResources: ComputeResource? + /// Reserved. + public let context: String? /// The details for the Amazon EKS cluster that supports the compute environment. public let eksConfiguration: EksConfiguration? /// The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For more information, see Batch service IAM role in the Batch User Guide. If your account already created the Batch service-linked role, that role is used by default for your compute environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your account, and no role is specified here, the service attempts to create the Batch service-linked role in your account. If your specified role has a path other than /, then you must specify either the full role ARN (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/, specify /foo/bar as the role name. For more information, see Friendly names and paths in the IAM User Guide. Depending on how you created your Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments. @@ -971,9 +977,10 @@ extension Batch { /// The maximum number of vCPUs for an unmanaged compute environment. This parameter is only used for fair share scheduling to reserve vCPU capacity for new share identifiers. If this parameter isn't provided for a fair share job queue, no vCPU capacity is reserved. This parameter is only supported when the type parameter is set to UNMANAGED. public let unmanagedvCpus: Int? - public init(computeEnvironmentName: String? = nil, computeResources: ComputeResource? = nil, eksConfiguration: EksConfiguration? = nil, serviceRole: String? = nil, state: CEState? = nil, tags: [String: String]? = nil, type: CEType? = nil, unmanagedvCpus: Int? = nil) { + public init(computeEnvironmentName: String? = nil, computeResources: ComputeResource? = nil, context: String? = nil, eksConfiguration: EksConfiguration? = nil, serviceRole: String? = nil, state: CEState? = nil, tags: [String: String]? = nil, type: CEType? = nil, unmanagedvCpus: Int? = nil) { self.computeEnvironmentName = computeEnvironmentName self.computeResources = computeResources + self.context = context self.eksConfiguration = eksConfiguration self.serviceRole = serviceRole self.state = state @@ -996,6 +1003,7 @@ extension Batch { private enum CodingKeys: String, CodingKey { case computeEnvironmentName = "computeEnvironmentName" case computeResources = "computeResources" + case context = "context" case eksConfiguration = "eksConfiguration" case serviceRole = "serviceRole" case state = "state" @@ -3867,6 +3875,8 @@ extension Batch { public let computeEnvironment: String? /// Details of the compute resources managed by the compute environment. Required for a managed compute environment. For more information, see Compute Environments in the Batch User Guide. public let computeResources: ComputeResourceUpdate? + /// Reserved. + public let context: String? /// The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For more information, see Batch service IAM role in the Batch User Guide. If the compute environment has a service-linked role, it can't be changed to use a regular IAM role. Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role. To update the parameters for the compute environment that require an infrastructure update to change, the AWSServiceRoleForBatch service-linked role must be used. For more information, see Updating compute environments in the Batch User Guide. If your specified role has a path other than /, then you must either specify the full role ARN (recommended) or prefix the role name with the path. Depending on how you created your Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments. public let serviceRole: String? /// The state of the compute environment. Compute environments in the ENABLED state can accept jobs from a queue and scale in or out automatically based on the workload demand of its associated queues. If the state is ENABLED, then the Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand. If the state is DISABLED, then the Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. Compute environments in a DISABLED state may continue to incur billing charges. To prevent additional charges, turn off and then delete the compute environment. For more information, see State in the Batch User Guide. When an instance is idle, the instance scales down to the minvCpus value. However, the instance size doesn't change. For example, consider a c5.8xlarge instance with a minvCpus value of 4 and a desiredvCpus value of 36. This instance doesn't scale down to a c5.large instance. @@ -3876,9 +3886,10 @@ extension Batch { /// Specifies the updated infrastructure update policy for the compute environment. For more information about infrastructure updates, see Updating compute environments in the Batch User Guide. public let updatePolicy: UpdatePolicy? - public init(computeEnvironment: String? = nil, computeResources: ComputeResourceUpdate? = nil, serviceRole: String? = nil, state: CEState? = nil, unmanagedvCpus: Int? = nil, updatePolicy: UpdatePolicy? = nil) { + public init(computeEnvironment: String? = nil, computeResources: ComputeResourceUpdate? = nil, context: String? = nil, serviceRole: String? = nil, state: CEState? = nil, unmanagedvCpus: Int? = nil, updatePolicy: UpdatePolicy? = nil) { self.computeEnvironment = computeEnvironment self.computeResources = computeResources + self.context = context self.serviceRole = serviceRole self.state = state self.unmanagedvCpus = unmanagedvCpus @@ -3893,6 +3904,7 @@ extension Batch { private enum CodingKeys: String, CodingKey { case computeEnvironment = "computeEnvironment" case computeResources = "computeResources" + case context = "context" case serviceRole = "serviceRole" case state = "state" case unmanagedvCpus = "unmanagedvCpus" diff --git a/Sources/Soto/Services/Bedrock/Bedrock_api.swift b/Sources/Soto/Services/Bedrock/Bedrock_api.swift index 9a3dab3b3d..46779d04ef 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_api.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_api.swift @@ -111,7 +111,7 @@ public struct Bedrock: AWSService { // MARK: API Calls - /// API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluations. + /// API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluation. @Sendable public func createEvaluationJob(_ input: CreateEvaluationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEvaluationJobResponse { return try await self.client.execute( @@ -150,6 +150,19 @@ public struct Bedrock: AWSService { ) } + /// Copies a model to another region so that it can be used there. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. + @Sendable + public func createModelCopyJob(_ input: CreateModelCopyJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelCopyJobResponse { + return try await self.client.execute( + operation: "CreateModelCopyJob", + path: "/model-copy-jobs", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a fine-tuning job to customize a base model. You specify the base foundation model and the location of the training data. After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes. For information on the format of training and validation data, see Prepare the datasets. Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size. To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status. For more information, see Custom models in the Amazon Bedrock User Guide. @Sendable public func createModelCustomizationJob(_ input: CreateModelCustomizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelCustomizationJobResponse { @@ -241,7 +254,7 @@ public struct Bedrock: AWSService { ) } - /// Retrieves the properties associated with a model evaluation job, including the status of the job. For more information, see Model evaluations. + /// Retrieves the properties associated with a model evaluation job, including the status of the job. For more information, see Model evaluation. @Sendable public func getEvaluationJob(_ input: GetEvaluationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEvaluationJobResponse { return try await self.client.execute( @@ -280,6 +293,19 @@ public struct Bedrock: AWSService { ) } + /// Retrieves information about a model copy job. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. + @Sendable + public func getModelCopyJob(_ input: GetModelCopyJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelCopyJobResponse { + return try await self.client.execute( + operation: "GetModelCopyJob", + path: "/model-copy-jobs/{jobArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the properties associated with a model-customization job, including the status of the job. For more information, see Custom models in the Amazon Bedrock User Guide. @Sendable public func getModelCustomizationJob(_ input: GetModelCustomizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelCustomizationJobResponse { @@ -371,6 +397,19 @@ public struct Bedrock: AWSService { ) } + /// Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. + @Sendable + public func listModelCopyJobs(_ input: ListModelCopyJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelCopyJobsResponse { + return try await self.client.execute( + operation: "ListModelCopyJobs", + path: "/model-copy-jobs", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Custom models in the Amazon Bedrock User Guide. @Sendable public func listModelCustomizationJobs(_ input: ListModelCustomizationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelCustomizationJobsResponse { @@ -572,6 +611,25 @@ extension Bedrock { ) } + /// Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listModelCopyJobsPaginator( + _ input: ListModelCopyJobsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listModelCopyJobs, + inputKey: \ListModelCopyJobsRequest.nextToken, + outputKey: \ListModelCopyJobsResponse.nextToken, + logger: logger + ) + } + /// Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Custom models in the Amazon Bedrock User Guide. /// Return PaginatorSequence for operation. /// @@ -618,6 +676,7 @@ extension Bedrock.ListCustomModelsRequest: AWSPaginateToken { creationTimeAfter: self.creationTimeAfter, creationTimeBefore: self.creationTimeBefore, foundationModelArnEquals: self.foundationModelArnEquals, + isOwned: self.isOwned, maxResults: self.maxResults, nameContains: self.nameContains, nextToken: token, @@ -652,6 +711,23 @@ extension Bedrock.ListGuardrailsRequest: AWSPaginateToken { } } +extension Bedrock.ListModelCopyJobsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Bedrock.ListModelCopyJobsRequest { + return .init( + creationTimeAfter: self.creationTimeAfter, + creationTimeBefore: self.creationTimeBefore, + maxResults: self.maxResults, + nextToken: token, + sortBy: self.sortBy, + sortOrder: self.sortOrder, + sourceAccountEquals: self.sourceAccountEquals, + sourceModelArnEquals: self.sourceModelArnEquals, + statusEquals: self.statusEquals, + targetModelNameContains: self.targetModelNameContains + ) + } +} + extension Bedrock.ListModelCustomizationJobsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Bedrock.ListModelCustomizationJobsRequest { return .init( diff --git a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift index 26e79279c8..7b8825eb9f 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift @@ -168,6 +168,13 @@ extension Bedrock { public var description: String { return self.rawValue } } + public enum ModelCopyJobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "Completed" + case failed = "Failed" + case inProgress = "InProgress" + public var description: String { return self.rawValue } + } + public enum ModelCustomization: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case continuedPreTraining = "CONTINUED_PRE_TRAINING" case fineTuning = "FINE_TUNING" @@ -568,6 +575,68 @@ extension Bedrock { } } + public struct CreateModelCopyJobRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, + /// Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. + public let clientRequestToken: String? + /// The ARN of the KMS key that you use to encrypt the model copy. + public let modelKmsKeyId: String? + /// The Amazon Resource Name (ARN) of the model to be copied. + public let sourceModelArn: String + /// A name for the copied model. + public let targetModelName: String + /// Tags to associate with the target model. For more information, see Tag resources in the Amazon Bedrock User Guide. + public let targetModelTags: [Tag]? + + public init(clientRequestToken: String? = CreateModelCopyJobRequest.idempotencyToken(), modelKmsKeyId: String? = nil, sourceModelArn: String, targetModelName: String, targetModelTags: [Tag]? = nil) { + self.clientRequestToken = clientRequestToken + self.modelKmsKeyId = modelKmsKeyId + self.sourceModelArn = sourceModelArn + self.targetModelName = targetModelName + self.targetModelTags = targetModelTags + } + + public func validate(name: String) throws { + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 256) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.modelKmsKeyId, name: "modelKmsKeyId", parent: name, max: 2048) + try self.validate(self.modelKmsKeyId, name: "modelKmsKeyId", parent: name, min: 1) + try self.validate(self.modelKmsKeyId, name: "modelKmsKeyId", parent: name, pattern: "^arn:aws(-[^:]+)?:kms:[a-zA-Z0-9-]*:[0-9]{12}:((key/[a-zA-Z0-9-]{36})|(alias/[a-zA-Z0-9-_/]+))$") + try self.validate(self.sourceModelArn, name: "sourceModelArn", parent: name, max: 1011) + try self.validate(self.sourceModelArn, name: "sourceModelArn", parent: name, min: 20) + try self.validate(self.sourceModelArn, name: "sourceModelArn", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))$") + try self.validate(self.targetModelName, name: "targetModelName", parent: name, max: 63) + try self.validate(self.targetModelName, name: "targetModelName", parent: name, min: 1) + try self.validate(self.targetModelName, name: "targetModelName", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,63}$") + try self.targetModelTags?.forEach { + try $0.validate(name: "\(name).targetModelTags[]") + } + try self.validate(self.targetModelTags, name: "targetModelTags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case clientRequestToken = "clientRequestToken" + case modelKmsKeyId = "modelKmsKeyId" + case sourceModelArn = "sourceModelArn" + case targetModelName = "targetModelName" + case targetModelTags = "targetModelTags" + } + } + + public struct CreateModelCopyJobResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the model copy job. + public let jobArn: String + + public init(jobArn: String) { + self.jobArn = jobArn + } + + private enum CodingKeys: String, CodingKey { + case jobArn = "jobArn" + } + } + public struct CreateModelCustomizationJobRequest: AWSEncodableShape { /// Name of the base model. public let baseModelIdentifier: String @@ -627,7 +696,7 @@ extension Bedrock { try self.validate(self.customModelKmsKeyId, name: "customModelKmsKeyId", parent: name, pattern: "^arn:aws(-[^:]+)?:kms:[a-zA-Z0-9-]*:[0-9]{12}:((key/[a-zA-Z0-9-]{36})|(alias/[a-zA-Z0-9-_/]+))$") try self.validate(self.customModelName, name: "customModelName", parent: name, max: 63) try self.validate(self.customModelName, name: "customModelName", parent: name, min: 1) - try self.validate(self.customModelName, name: "customModelName", parent: name, pattern: "^([0-9a-zA-Z][_-]?)+$") + try self.validate(self.customModelName, name: "customModelName", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,63}$") try self.customModelTags?.forEach { try $0.validate(name: "\(name).customModelTags[]") } @@ -755,14 +824,17 @@ extension Bedrock { public let modelArn: String /// The name of the custom model. public let modelName: String + /// The unique identifier of the account that owns the model. + public let ownerAccountId: String? - public init(baseModelArn: String, baseModelName: String, creationTime: Date, customizationType: CustomizationType? = nil, modelArn: String, modelName: String) { + public init(baseModelArn: String, baseModelName: String, creationTime: Date, customizationType: CustomizationType? = nil, modelArn: String, modelName: String, ownerAccountId: String? = nil) { self.baseModelArn = baseModelArn self.baseModelName = baseModelName self.creationTime = creationTime self.customizationType = customizationType self.modelArn = modelArn self.modelName = modelName + self.ownerAccountId = ownerAccountId } private enum CodingKeys: String, CodingKey { @@ -772,6 +844,7 @@ extension Bedrock { case customizationType = "customizationType" case modelArn = "modelArn" case modelName = "modelName" + case ownerAccountId = "ownerAccountId" } } @@ -1435,6 +1508,82 @@ extension Bedrock { } } + public struct GetModelCopyJobRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the model copy job. + public let jobArn: String + + public init(jobArn: String) { + self.jobArn = jobArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.jobArn, key: "jobArn") + } + + public func validate(name: String) throws { + try self.validate(self.jobArn, name: "jobArn", parent: name, max: 1011) + try self.validate(self.jobArn, name: "jobArn", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:model-copy-job/[a-z0-9]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetModelCopyJobResponse: AWSDecodableShape { + /// The time at which the model copy job was created. + @CustomCoding + public var creationTime: Date + /// An error message for why the model copy job failed. + public let failureMessage: String? + /// The Amazon Resource Name (ARN) of the model copy job. + public let jobArn: String + /// The unique identifier of the account that the model being copied originated from. + public let sourceAccountId: String + /// The Amazon Resource Name (ARN) of the original model being copied. + public let sourceModelArn: String + /// The name of the original model being copied. + public let sourceModelName: String? + /// The status of the model copy job. + public let status: ModelCopyJobStatus + /// The Amazon Resource Name (ARN) of the copied model. + public let targetModelArn: String + /// The Amazon Resource Name (ARN) of the KMS key encrypting the copied model. + public let targetModelKmsKeyArn: String? + /// The name of the copied model. + public let targetModelName: String? + /// The tags associated with the copied model. + public let targetModelTags: [Tag]? + + public init(creationTime: Date, failureMessage: String? = nil, jobArn: String, sourceAccountId: String, sourceModelArn: String, sourceModelName: String? = nil, status: ModelCopyJobStatus, targetModelArn: String, targetModelKmsKeyArn: String? = nil, targetModelName: String? = nil, targetModelTags: [Tag]? = nil) { + self.creationTime = creationTime + self.failureMessage = failureMessage + self.jobArn = jobArn + self.sourceAccountId = sourceAccountId + self.sourceModelArn = sourceModelArn + self.sourceModelName = sourceModelName + self.status = status + self.targetModelArn = targetModelArn + self.targetModelKmsKeyArn = targetModelKmsKeyArn + self.targetModelName = targetModelName + self.targetModelTags = targetModelTags + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case failureMessage = "failureMessage" + case jobArn = "jobArn" + case sourceAccountId = "sourceAccountId" + case sourceModelArn = "sourceModelArn" + case sourceModelName = "sourceModelName" + case status = "status" + case targetModelArn = "targetModelArn" + case targetModelKmsKeyArn = "targetModelKmsKeyArn" + case targetModelName = "targetModelName" + case targetModelTags = "targetModelTags" + } + } + public struct GetModelCustomizationJobRequest: AWSEncodableShape { /// Identifier for the customization job. public let jobIdentifier: String @@ -1821,7 +1970,7 @@ extension Bedrock { public struct GuardrailPiiEntity: AWSDecodableShape { /// The configured guardrail action when PII entity is detected. public let action: GuardrailSensitiveInformationAction - /// The type of PII entity. For example, Social Security Number. + /// The type of PII entity. For exampvle, Social Security Number. public let type: GuardrailPiiEntityType public init(action: GuardrailSensitiveInformationAction, type: GuardrailPiiEntityType) { @@ -2251,22 +2400,25 @@ extension Bedrock { public var creationTimeBefore: Date? /// Return custom models only if the foundation model Amazon Resource Name (ARN) matches this parameter. public let foundationModelArnEquals: String? - /// Maximum number of results to return in the response. + /// Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false). + public let isOwned: Bool? + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. public let maxResults: Int? /// Return custom models only if the job name contains these characters. public let nameContains: String? - /// Continuation token from the previous response, for Amazon Bedrock to list the next set of results. + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. public let nextToken: String? /// The field to sort by in the returned list of models. public let sortBy: SortModelsBy? /// The sort order of the results. public let sortOrder: SortOrder? - public init(baseModelArnEquals: String? = nil, creationTimeAfter: Date? = nil, creationTimeBefore: Date? = nil, foundationModelArnEquals: String? = nil, maxResults: Int? = nil, nameContains: String? = nil, nextToken: String? = nil, sortBy: SortModelsBy? = nil, sortOrder: SortOrder? = nil) { + public init(baseModelArnEquals: String? = nil, creationTimeAfter: Date? = nil, creationTimeBefore: Date? = nil, foundationModelArnEquals: String? = nil, isOwned: Bool? = nil, maxResults: Int? = nil, nameContains: String? = nil, nextToken: String? = nil, sortBy: SortModelsBy? = nil, sortOrder: SortOrder? = nil) { self.baseModelArnEquals = baseModelArnEquals self.creationTimeAfter = creationTimeAfter self.creationTimeBefore = creationTimeBefore self.foundationModelArnEquals = foundationModelArnEquals + self.isOwned = isOwned self.maxResults = maxResults self.nameContains = nameContains self.nextToken = nextToken @@ -2281,6 +2433,7 @@ extension Bedrock { request.encodeQuery(self._creationTimeAfter, key: "creationTimeAfter") request.encodeQuery(self._creationTimeBefore, key: "creationTimeBefore") request.encodeQuery(self.foundationModelArnEquals, key: "foundationModelArnEquals") + request.encodeQuery(self.isOwned, key: "isOwned") request.encodeQuery(self.maxResults, key: "maxResults") request.encodeQuery(self.nameContains, key: "nameContains") request.encodeQuery(self.nextToken, key: "nextToken") @@ -2297,7 +2450,7 @@ extension Bedrock { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nameContains, name: "nameContains", parent: name, max: 63) try self.validate(self.nameContains, name: "nameContains", parent: name, min: 1) - try self.validate(self.nameContains, name: "nameContains", parent: name, pattern: "^([0-9a-zA-Z][_-]?)+$") + try self.validate(self.nameContains, name: "nameContains", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,63}$") try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") @@ -2309,7 +2462,7 @@ extension Bedrock { public struct ListCustomModelsResponse: AWSDecodableShape { /// Model summaries. public let modelSummaries: [CustomModelSummary]? - /// Continuation token for the next request to list the next set of results. + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? public init(modelSummaries: [CustomModelSummary]? = nil, nextToken: String? = nil) { @@ -2496,6 +2649,93 @@ extension Bedrock { } } + public struct ListModelCopyJobsRequest: AWSEncodableShape { + /// Filters for model copy jobs created after the specified time. + @OptionalCustomCoding + public var creationTimeAfter: Date? + /// Filters for model copy jobs created before the specified time. + @OptionalCustomCoding + public var creationTimeBefore: Date? + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. + public let maxResults: Int? + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. + public let nextToken: String? + /// The field to sort by in the returned list of model copy jobs. + public let sortBy: SortJobsBy? + /// Specifies whether to sort the results in ascending or descending order. + public let sortOrder: SortOrder? + /// Filters for model copy jobs in which the account that the source model belongs to is equal to the value that you specify. + public let sourceAccountEquals: String? + /// Filters for model copy jobs in which the Amazon Resource Name (ARN) of the source model to is equal to the value that you specify. + public let sourceModelArnEquals: String? + /// Filters for model copy jobs whose status matches the value that you specify. + public let statusEquals: ModelCopyJobStatus? + /// Filters for model copy jobs in which the name of the copied model contains the string that you specify. + public let targetModelNameContains: String? + + public init(creationTimeAfter: Date? = nil, creationTimeBefore: Date? = nil, maxResults: Int? = nil, nextToken: String? = nil, sortBy: SortJobsBy? = nil, sortOrder: SortOrder? = nil, sourceAccountEquals: String? = nil, sourceModelArnEquals: String? = nil, statusEquals: ModelCopyJobStatus? = nil, targetModelNameContains: String? = nil) { + self.creationTimeAfter = creationTimeAfter + self.creationTimeBefore = creationTimeBefore + self.maxResults = maxResults + self.nextToken = nextToken + self.sortBy = sortBy + self.sortOrder = sortOrder + self.sourceAccountEquals = sourceAccountEquals + self.sourceModelArnEquals = sourceModelArnEquals + self.statusEquals = statusEquals + self.targetModelNameContains = targetModelNameContains + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self._creationTimeAfter, key: "creationTimeAfter") + request.encodeQuery(self._creationTimeBefore, key: "creationTimeBefore") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.sortBy, key: "sortBy") + request.encodeQuery(self.sortOrder, key: "sortOrder") + request.encodeQuery(self.sourceAccountEquals, key: "sourceAccountEquals") + request.encodeQuery(self.sourceModelArnEquals, key: "sourceModelArnEquals") + request.encodeQuery(self.statusEquals, key: "statusEquals") + request.encodeQuery(self.targetModelNameContains, key: "outputModelNameContains") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") + try self.validate(self.sourceAccountEquals, name: "sourceAccountEquals", parent: name, pattern: "^[0-9]{12}$") + try self.validate(self.sourceModelArnEquals, name: "sourceModelArnEquals", parent: name, max: 1011) + try self.validate(self.sourceModelArnEquals, name: "sourceModelArnEquals", parent: name, min: 20) + try self.validate(self.sourceModelArnEquals, name: "sourceModelArnEquals", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))$") + try self.validate(self.targetModelNameContains, name: "targetModelNameContains", parent: name, max: 63) + try self.validate(self.targetModelNameContains, name: "targetModelNameContains", parent: name, min: 1) + try self.validate(self.targetModelNameContains, name: "targetModelNameContains", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,63}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListModelCopyJobsResponse: AWSDecodableShape { + /// A list of information about each model copy job. + public let modelCopyJobSummaries: [ModelCopyJobSummary]? + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. + public let nextToken: String? + + public init(modelCopyJobSummaries: [ModelCopyJobSummary]? = nil, nextToken: String? = nil) { + self.modelCopyJobSummaries = modelCopyJobSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case modelCopyJobSummaries = "modelCopyJobSummaries" + case nextToken = "nextToken" + } + } + public struct ListModelCustomizationJobsRequest: AWSEncodableShape { /// Return customization jobs created after the specified time. @OptionalCustomCoding @@ -2503,11 +2743,11 @@ extension Bedrock { /// Return customization jobs created before the specified time. @OptionalCustomCoding public var creationTimeBefore: Date? - /// Maximum number of results to return in the response. + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. public let maxResults: Int? /// Return customization jobs only if the job name contains these characters. public let nameContains: String? - /// Continuation token from the previous response, for Amazon Bedrock to list the next set of results. + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. public let nextToken: String? /// The field to sort by in the returned list of jobs. public let sortBy: SortJobsBy? @@ -2557,7 +2797,7 @@ extension Bedrock { public struct ListModelCustomizationJobsResponse: AWSDecodableShape { /// Job summaries. public let modelCustomizationJobSummaries: [ModelCustomizationJobSummary]? - /// Page continuation token to use in the next request. + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? public init(modelCustomizationJobSummaries: [ModelCustomizationJobSummary]? = nil, nextToken: String? = nil) { @@ -2719,6 +2959,60 @@ extension Bedrock { } } + public struct ModelCopyJobSummary: AWSDecodableShape { + /// The time that the model copy job was created. + @CustomCoding + public var creationTime: Date + /// If a model fails to be copied, a message describing why the job failed is included here. + public let failureMessage: String? + /// The Amazon Resoource Name (ARN) of the model copy job. + public let jobArn: String + /// The unique identifier of the account that the model being copied originated from. + public let sourceAccountId: String + /// The Amazon Resource Name (ARN) of the original model being copied. + public let sourceModelArn: String + /// The name of the original model being copied. + public let sourceModelName: String? + /// The status of the model copy job. + public let status: ModelCopyJobStatus + /// The Amazon Resource Name (ARN) of the copied model. + public let targetModelArn: String + /// The Amazon Resource Name (ARN) of the KMS key used to encrypt the copied model. + public let targetModelKmsKeyArn: String? + /// The name of the copied model. + public let targetModelName: String? + /// Tags associated with the copied model. + public let targetModelTags: [Tag]? + + public init(creationTime: Date, failureMessage: String? = nil, jobArn: String, sourceAccountId: String, sourceModelArn: String, sourceModelName: String? = nil, status: ModelCopyJobStatus, targetModelArn: String, targetModelKmsKeyArn: String? = nil, targetModelName: String? = nil, targetModelTags: [Tag]? = nil) { + self.creationTime = creationTime + self.failureMessage = failureMessage + self.jobArn = jobArn + self.sourceAccountId = sourceAccountId + self.sourceModelArn = sourceModelArn + self.sourceModelName = sourceModelName + self.status = status + self.targetModelArn = targetModelArn + self.targetModelKmsKeyArn = targetModelKmsKeyArn + self.targetModelName = targetModelName + self.targetModelTags = targetModelTags + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case failureMessage = "failureMessage" + case jobArn = "jobArn" + case sourceAccountId = "sourceAccountId" + case sourceModelArn = "sourceModelArn" + case sourceModelName = "sourceModelName" + case status = "status" + case targetModelArn = "targetModelArn" + case targetModelKmsKeyArn = "targetModelKmsKeyArn" + case targetModelName = "targetModelName" + case targetModelTags = "targetModelTags" + } + } + public struct ModelCustomizationJobSummary: AWSDecodableShape { /// Amazon Resource Name (ARN) of the base model. public let baseModelArn: String @@ -3412,7 +3706,7 @@ public struct BedrockErrorType: AWSErrorType { public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } /// The number of requests exceeds the limit. Resubmit your request later. public static var throttlingException: Self { .init(.throttlingException) } - /// The request contains more tags than can be associated with a resource (50 tags per resource). The maximum number of tags includes both existing tags and those included in your current request. + /// The request contains more tags than can be associated with a resource (50 tags per resource). The maximum number of tags includes both existing tags and those included in your current request. public static var tooManyTagsException: Self { .init(.tooManyTagsException) } /// Input validation failed. Check your request parameters and retry the request. public static var validationException: Self { .init(.validationException) } diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift index 1af7eda121..9edffda05d 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift @@ -113,7 +113,7 @@ public struct BedrockAgentRuntime: AWSService { ) } - /// Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + /// Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeFlow. @Sendable public func invokeFlow(_ input: InvokeFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeFlowResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index 9d51cae626..483ad721a5 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -380,6 +380,8 @@ extension BedrockAgentRuntime { case invocationInput(InvocationInput) /// The input for the orchestration step. The type is ORCHESTRATION. The text contains the prompt. The inferenceConfiguration, parserMode, and overrideLambda values are set in the PromptOverrideConfiguration object that was set when the agent was created or updated. case modelInvocationInput(ModelInvocationInput) + /// Contains information pertaining to the output from the foundation model that is being invoked. + case modelInvocationOutput(OrchestrationModelInvocationOutput) /// Details about the observation (the output of the action group Lambda or knowledge base) made by the agent. case observation(Observation) /// Details about the reasoning, based on the input, that the agent uses to justify carrying out an action group or getting information from a knowledge base. @@ -401,6 +403,9 @@ extension BedrockAgentRuntime { case .modelInvocationInput: let value = try container.decode(ModelInvocationInput.self, forKey: .modelInvocationInput) self = .modelInvocationInput(value) + case .modelInvocationOutput: + let value = try container.decode(OrchestrationModelInvocationOutput.self, forKey: .modelInvocationOutput) + self = .modelInvocationOutput(value) case .observation: let value = try container.decode(Observation.self, forKey: .observation) self = .observation(value) @@ -413,6 +418,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case invocationInput = "invocationInput" case modelInvocationInput = "modelInvocationInput" + case modelInvocationOutput = "modelInvocationOutput" case observation = "observation" case rationale = "rationale" } @@ -1323,11 +1329,11 @@ extension BedrockAgentRuntime { } public struct FlowInput: AWSEncodableShape { - /// Contains information about an input into the flow. + /// Contains information about an input into the prompt flow. public let content: FlowInputContent - /// A name for the input of the flow input node. + /// The name of the flow input node that begins the prompt flow. public let nodeName: String - /// A name for the output of the flow input node. + /// The name of the output from the flow input node that begins the prompt flow. public let nodeOutputName: String public init(content: FlowInputContent, nodeName: String, nodeOutputName: String) { @@ -1349,11 +1355,11 @@ extension BedrockAgentRuntime { } public struct FlowOutputEvent: AWSDecodableShape { - /// The output of the node. + /// The content in the output. public let content: FlowOutputContent - /// The name of the node to which input was provided. + /// The name of the flow output node that the output is from. public let nodeName: String - /// The type of node to which input was provided. + /// The type of the node that the output is from. public let nodeType: NodeType public init(content: FlowOutputContent, nodeName: String, nodeType: NodeType) { @@ -1831,7 +1837,7 @@ extension BedrockAgentRuntime { public let temperature: Float? /// While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for topK is the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topK to 50, the model selects the next token from among the top 50 most likely choices. public let topK: Int? - /// While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens. + /// While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 0.8, the model only selects the next token from the top 80% of the probability distribution of next tokens. public let topP: Float? public init(maximumLength: Int? = nil, stopSequences: [String]? = nil, temperature: Float? = nil, topK: Int? = nil, topP: Float? = nil) { @@ -2272,6 +2278,19 @@ extension BedrockAgentRuntime { } } + public struct Metadata: AWSDecodableShape { + /// Contains details of the foundation model usage. + public let usage: Usage? + + public init(usage: Usage? = nil) { + self.usage = usage + } + + private enum CodingKeys: String, CodingKey { + case usage = "usage" + } + } + public struct ModelInvocationInput: AWSDecodableShape { /// Specifications about the inference parameters that were provided alongside the prompt. These are specified in the PromptOverrideConfiguration object that was set when the agent was created or updated. For more information, see Inference parameters for foundation models. public let inferenceConfiguration: InferenceConfiguration? @@ -2359,6 +2378,27 @@ extension BedrockAgentRuntime { } } + public struct OrchestrationModelInvocationOutput: AWSDecodableShape { + /// Contains information about the foundation model output. + public let metadata: Metadata? + /// Contains details of the raw response from the foundation model output. + public let rawResponse: RawResponse? + /// The unique identifier of the trace. + public let traceId: String? + + public init(metadata: Metadata? = nil, rawResponse: RawResponse? = nil, traceId: String? = nil) { + self.metadata = metadata + self.rawResponse = rawResponse + self.traceId = traceId + } + + private enum CodingKeys: String, CodingKey { + case metadata = "metadata" + case rawResponse = "rawResponse" + case traceId = "traceId" + } + } + public struct OutputFile: AWSDecodableShape { /// The byte count of files that contains response from code interpreter. public let bytes: AWSBase64Data? @@ -2543,6 +2583,19 @@ extension BedrockAgentRuntime { } } + public struct RawResponse: AWSDecodableShape { + /// The foundation model's raw output content. + public let content: String? + + public init(content: String? = nil) { + self.content = content + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + } + } + public struct RepromptResponse: AWSDecodableShape { /// Specifies what output is prompting the agent to reprompt the input. public let source: Source? @@ -3127,6 +3180,23 @@ extension BedrockAgentRuntime { } } + public struct Usage: AWSDecodableShape { + /// Contains information about the input tokens from the foundation model usage. + public let inputTokens: Int? + /// Contains information about the output tokens from the foundation model usage. + public let outputTokens: Int? + + public init(inputTokens: Int? = nil, outputTokens: Int? = nil) { + self.inputTokens = inputTokens + self.outputTokens = outputTokens + } + + private enum CodingKeys: String, CodingKey { + case inputTokens = "inputTokens" + case outputTokens = "outputTokens" + } + } + public struct ValidationException: AWSDecodableShape { public let message: String? @@ -3140,7 +3210,7 @@ extension BedrockAgentRuntime { } public struct FlowInputContent: AWSEncodableShape { - /// The input for the flow input node. + /// The input to send to the prompt flow input node. public let document: String? public init(document: String? = nil) { @@ -3153,7 +3223,7 @@ extension BedrockAgentRuntime { } public struct FlowOutputContent: AWSDecodableShape { - /// A name for the output of the flow. + /// The content in the output. public let document: String? public init(document: String? = nil) { diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift index 7a18162def..fc5c4aebbb 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift @@ -87,7 +87,7 @@ public struct BedrockRuntime: AWSService { ) } - /// Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. + /// Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model. Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. @Sendable public func converse(_ input: ConverseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConverseResponse { return try await self.client.execute( @@ -100,7 +100,7 @@ public struct BedrockRuntime: AWSService { ) } - /// Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Conversation streaming example in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModelWithResponseStream action. + /// Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. The CLI doesn't support streaming operations in Amazon Bedrock, including ConverseStream. Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Conversation streaming example in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModelWithResponseStream action. @Sendable public func converseStream(_ input: ConverseStreamRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConverseStreamResponse { return try await self.client.execute( @@ -126,7 +126,7 @@ public struct BedrockRuntime: AWSService { ) } - /// Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream. To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. The CLI doesn't support InvokeModelWithResponseStream. For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action. + /// Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream. To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream. For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action. @Sendable public func invokeModelWithResponseStream(_ input: InvokeModelWithResponseStreamRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeModelWithResponseStreamResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift index d8d218afbb..b8b11c78d5 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift @@ -342,6 +342,8 @@ extension BedrockRuntime { case metadata(ConverseStreamMetadataEvent) /// A streaming error occurred. Retry your request. case modelStreamErrorException(ModelStreamErrorException) + /// The service isn't currently available. Try again later. + case serviceUnavailableException(ServiceUnavailableException) /// The number of requests exceeds the limit. Resubmit your request later. case throttlingException(ThrottlingException) /// Input validation failed. Check your request parameters and retry the request. @@ -381,6 +383,9 @@ extension BedrockRuntime { case .modelStreamErrorException: let value = try container.decode(ModelStreamErrorException.self, forKey: .modelStreamErrorException) self = .modelStreamErrorException(value) + case .serviceUnavailableException: + let value = try container.decode(ServiceUnavailableException.self, forKey: .serviceUnavailableException) + self = .serviceUnavailableException(value) case .throttlingException: let value = try container.decode(ThrottlingException.self, forKey: .throttlingException) self = .throttlingException(value) @@ -399,6 +404,7 @@ extension BedrockRuntime { case messageStop = "messageStop" case metadata = "metadata" case modelStreamErrorException = "modelStreamErrorException" + case serviceUnavailableException = "serviceUnavailableException" case throttlingException = "throttlingException" case validationException = "validationException" } @@ -413,7 +419,8 @@ extension BedrockRuntime { case modelStreamErrorException(ModelStreamErrorException) /// The request took too long to process. Processing time exceeded the model timeout length. case modelTimeoutException(ModelTimeoutException) - /// The number or frequency of requests exceeds the limit. Resubmit your request later. + case serviceUnavailableException(ServiceUnavailableException) + /// Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process. case throttlingException(ThrottlingException) /// Input validation failed. Check your request parameters and retry the request. case validationException(ValidationException) @@ -440,6 +447,9 @@ extension BedrockRuntime { case .modelTimeoutException: let value = try container.decode(ModelTimeoutException.self, forKey: .modelTimeoutException) self = .modelTimeoutException(value) + case .serviceUnavailableException: + let value = try container.decode(ServiceUnavailableException.self, forKey: .serviceUnavailableException) + self = .serviceUnavailableException(value) case .throttlingException: let value = try container.decode(ThrottlingException.self, forKey: .throttlingException) self = .throttlingException(value) @@ -454,6 +464,7 @@ extension BedrockRuntime { case internalServerException = "internalServerException" case modelStreamErrorException = "modelStreamErrorException" case modelTimeoutException = "modelTimeoutException" + case serviceUnavailableException = "serviceUnavailableException" case throttlingException = "throttlingException" case validationException = "validationException" } @@ -1701,6 +1712,18 @@ extension BedrockRuntime { } } + public struct ServiceUnavailableException: AWSDecodableShape { + public let message: String? + + public init(message: String? = nil) { + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + } + } + public struct SpecificToolChoice: AWSEncodableShape { /// The name of the tool that the model must request. public let name: String @@ -2026,6 +2049,7 @@ public struct BedrockRuntimeErrorType: AWSErrorType { case modelTimeoutException = "ModelTimeoutException" case resourceNotFoundException = "ResourceNotFoundException" case serviceQuotaExceededException = "ServiceQuotaExceededException" + case serviceUnavailableException = "ServiceUnavailableException" case throttlingException = "ThrottlingException" case validationException = "ValidationException" } @@ -2062,9 +2086,11 @@ public struct BedrockRuntimeErrorType: AWSErrorType { public static var modelTimeoutException: Self { .init(.modelTimeoutException) } /// The specified resource ARN was not found. Check the ARN and try your request again. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - /// The number of requests exceeds the service quota. Resubmit your request later. + /// Your request exceeds the service quota for your account. You can view your quotas at Viewing service quotas. You can resubmit your request later. public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } - /// The number of requests exceeds the limit. Resubmit your request later. + /// The service isn't currently available. Try again later. + public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) } + /// Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process. public static var throttlingException: Self { .init(.throttlingException) } /// Input validation failed. Check your request parameters and retry the request. public static var validationException: Self { .init(.validationException) } diff --git a/Sources/Soto/Services/CleanRooms/CleanRooms_api.swift b/Sources/Soto/Services/CleanRooms/CleanRooms_api.swift index 2ddedff1bb..ad007b246b 100644 --- a/Sources/Soto/Services/CleanRooms/CleanRooms_api.swift +++ b/Sources/Soto/Services/CleanRooms/CleanRooms_api.swift @@ -190,6 +190,45 @@ public struct CleanRooms: AWSService { ) } + /// Creates a new analysis rule for an associated configured table. + @Sendable + public func createConfiguredTableAssociationAnalysisRule(_ input: CreateConfiguredTableAssociationAnalysisRuleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConfiguredTableAssociationAnalysisRuleOutput { + return try await self.client.execute( + operation: "CreateConfiguredTableAssociationAnalysisRule", + path: "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates an ID mapping table. + @Sendable + public func createIdMappingTable(_ input: CreateIdMappingTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIdMappingTableOutput { + return try await self.client.execute( + operation: "CreateIdMappingTable", + path: "/memberships/{membershipIdentifier}/idmappingtables", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates an ID namespace association. + @Sendable + public func createIdNamespaceAssociation(_ input: CreateIdNamespaceAssociationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIdNamespaceAssociationOutput { + return try await self.client.execute( + operation: "CreateIdNamespaceAssociation", + path: "/memberships/{membershipIdentifier}/idnamespaceassociations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a membership for a specific collaboration identifier and joins the collaboration. @Sendable public func createMembership(_ input: CreateMembershipInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMembershipOutput { @@ -294,6 +333,45 @@ public struct CleanRooms: AWSService { ) } + /// Deletes an analysis rule for a configured table association. + @Sendable + public func deleteConfiguredTableAssociationAnalysisRule(_ input: DeleteConfiguredTableAssociationAnalysisRuleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteConfiguredTableAssociationAnalysisRuleOutput { + return try await self.client.execute( + operation: "DeleteConfiguredTableAssociationAnalysisRule", + path: "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an ID mapping table. + @Sendable + public func deleteIdMappingTable(_ input: DeleteIdMappingTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteIdMappingTableOutput { + return try await self.client.execute( + operation: "DeleteIdMappingTable", + path: "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an ID namespace association. + @Sendable + public func deleteIdNamespaceAssociation(_ input: DeleteIdNamespaceAssociationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteIdNamespaceAssociationOutput { + return try await self.client.execute( + operation: "DeleteIdNamespaceAssociation", + path: "/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes the specified member from a collaboration. The removed member is placed in the Removed status and can't interact with the collaboration. The removed member's data is inaccessible to active members of the collaboration. @Sendable public func deleteMember(_ input: DeleteMemberInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteMemberOutput { @@ -385,6 +463,19 @@ public struct CleanRooms: AWSService { ) } + /// Retrieves an ID namespace association from a specific collaboration. + @Sendable + public func getCollaborationIdNamespaceAssociation(_ input: GetCollaborationIdNamespaceAssociationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCollaborationIdNamespaceAssociationOutput { + return try await self.client.execute( + operation: "GetCollaborationIdNamespaceAssociation", + path: "/collaborations/{collaborationIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns details about a specified privacy budget template. @Sendable public func getCollaborationPrivacyBudgetTemplate(_ input: GetCollaborationPrivacyBudgetTemplateInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCollaborationPrivacyBudgetTemplateOutput { @@ -450,6 +541,45 @@ public struct CleanRooms: AWSService { ) } + /// Retrieves the analysis rule for a configured table association. + @Sendable + public func getConfiguredTableAssociationAnalysisRule(_ input: GetConfiguredTableAssociationAnalysisRuleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfiguredTableAssociationAnalysisRuleOutput { + return try await self.client.execute( + operation: "GetConfiguredTableAssociationAnalysisRule", + path: "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves an ID mapping table. + @Sendable + public func getIdMappingTable(_ input: GetIdMappingTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetIdMappingTableOutput { + return try await self.client.execute( + operation: "GetIdMappingTable", + path: "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves an ID namespace association. + @Sendable + public func getIdNamespaceAssociation(_ input: GetIdNamespaceAssociationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetIdNamespaceAssociationOutput { + return try await self.client.execute( + operation: "GetIdNamespaceAssociation", + path: "/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves a specified membership for an identifier. @Sendable public func getMembership(_ input: GetMembershipInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMembershipOutput { @@ -554,6 +684,19 @@ public struct CleanRooms: AWSService { ) } + /// Returns a list of the ID namespace associations in a collaboration. + @Sendable + public func listCollaborationIdNamespaceAssociations(_ input: ListCollaborationIdNamespaceAssociationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationIdNamespaceAssociationsOutput { + return try await self.client.execute( + operation: "ListCollaborationIdNamespaceAssociations", + path: "/collaborations/{collaborationIdentifier}/idnamespaceassociations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns an array that summarizes each privacy budget template in a specified collaboration. @Sendable public func listCollaborationPrivacyBudgetTemplates(_ input: ListCollaborationPrivacyBudgetTemplatesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationPrivacyBudgetTemplatesOutput { @@ -632,6 +775,32 @@ public struct CleanRooms: AWSService { ) } + /// Returns a list of ID mapping tables. + @Sendable + public func listIdMappingTables(_ input: ListIdMappingTablesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListIdMappingTablesOutput { + return try await self.client.execute( + operation: "ListIdMappingTables", + path: "/memberships/{membershipIdentifier}/idmappingtables", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of ID namespace associations. + @Sendable + public func listIdNamespaceAssociations(_ input: ListIdNamespaceAssociationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListIdNamespaceAssociationsOutput { + return try await self.client.execute( + operation: "ListIdNamespaceAssociations", + path: "/memberships/{membershipIdentifier}/idnamespaceassociations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists all members within a collaboration. @Sendable public func listMembers(_ input: ListMembersInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMembersOutput { @@ -723,6 +892,19 @@ public struct CleanRooms: AWSService { ) } + /// Defines the information that's necessary to populate an ID mapping table. + @Sendable + public func populateIdMappingTable(_ input: PopulateIdMappingTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PopulateIdMappingTableOutput { + return try await self.client.execute( + operation: "PopulateIdMappingTable", + path: "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}/populate", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// An estimate of the number of aggregation functions that the member who can query can run given epsilon and noise parameters. @Sendable public func previewPrivacyImpact(_ input: PreviewPrivacyImpactInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PreviewPrivacyImpactOutput { @@ -853,6 +1035,45 @@ public struct CleanRooms: AWSService { ) } + /// Updates the analysis rule for a configured table association. + @Sendable + public func updateConfiguredTableAssociationAnalysisRule(_ input: UpdateConfiguredTableAssociationAnalysisRuleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateConfiguredTableAssociationAnalysisRuleOutput { + return try await self.client.execute( + operation: "UpdateConfiguredTableAssociationAnalysisRule", + path: "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Provides the details that are necessary to update an ID mapping table. + @Sendable + public func updateIdMappingTable(_ input: UpdateIdMappingTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateIdMappingTableOutput { + return try await self.client.execute( + operation: "UpdateIdMappingTable", + path: "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Provides the details that are necessary to update an ID namespace association. + @Sendable + public func updateIdNamespaceAssociation(_ input: UpdateIdNamespaceAssociationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateIdNamespaceAssociationOutput { + return try await self.client.execute( + operation: "UpdateIdNamespaceAssociation", + path: "/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates a membership. @Sendable public func updateMembership(_ input: UpdateMembershipInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateMembershipOutput { @@ -925,6 +1146,25 @@ extension CleanRooms { ) } + /// Returns a list of the ID namespace associations in a collaboration. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listCollaborationIdNamespaceAssociationsPaginator( + _ input: ListCollaborationIdNamespaceAssociationsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listCollaborationIdNamespaceAssociations, + inputKey: \ListCollaborationIdNamespaceAssociationsInput.nextToken, + outputKey: \ListCollaborationIdNamespaceAssociationsOutput.nextToken, + logger: logger + ) + } + /// Returns an array that summarizes each privacy budget template in a specified collaboration. /// Return PaginatorSequence for operation. /// @@ -982,6 +1222,44 @@ extension CleanRooms { ) } + /// Returns a list of ID mapping tables. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listIdMappingTablesPaginator( + _ input: ListIdMappingTablesInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listIdMappingTables, + inputKey: \ListIdMappingTablesInput.nextToken, + outputKey: \ListIdMappingTablesOutput.nextToken, + logger: logger + ) + } + + /// Returns a list of ID namespace associations. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listIdNamespaceAssociationsPaginator( + _ input: ListIdNamespaceAssociationsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listIdNamespaceAssociations, + inputKey: \ListIdNamespaceAssociationsInput.nextToken, + outputKey: \ListIdNamespaceAssociationsOutput.nextToken, + logger: logger + ) + } + /// Returns detailed information about the privacy budget templates in a specified membership. /// Return PaginatorSequence for operation. /// @@ -1050,6 +1328,16 @@ extension CleanRooms.ListCollaborationConfiguredAudienceModelAssociationsInput: } } +extension CleanRooms.ListCollaborationIdNamespaceAssociationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> CleanRooms.ListCollaborationIdNamespaceAssociationsInput { + return .init( + collaborationIdentifier: self.collaborationIdentifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension CleanRooms.ListCollaborationPrivacyBudgetTemplatesInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> CleanRooms.ListCollaborationPrivacyBudgetTemplatesInput { return .init( @@ -1081,6 +1369,26 @@ extension CleanRooms.ListConfiguredAudienceModelAssociationsInput: AWSPaginateTo } } +extension CleanRooms.ListIdMappingTablesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> CleanRooms.ListIdMappingTablesInput { + return .init( + maxResults: self.maxResults, + membershipIdentifier: self.membershipIdentifier, + nextToken: token + ) + } +} + +extension CleanRooms.ListIdNamespaceAssociationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> CleanRooms.ListIdNamespaceAssociationsInput { + return .init( + maxResults: self.maxResults, + membershipIdentifier: self.membershipIdentifier, + nextToken: token + ) + } +} + extension CleanRooms.ListPrivacyBudgetTemplatesInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> CleanRooms.ListPrivacyBudgetTemplatesInput { return .init( diff --git a/Sources/Soto/Services/CleanRooms/CleanRooms_shapes.swift b/Sources/Soto/Services/CleanRooms/CleanRooms_shapes.swift index 6bd3302747..4397d2b952 100644 --- a/Sources/Soto/Services/CleanRooms/CleanRooms_shapes.swift +++ b/Sources/Soto/Services/CleanRooms/CleanRooms_shapes.swift @@ -26,6 +26,13 @@ import Foundation extension CleanRooms { // MARK: Enums + public enum AdditionalAnalyses: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allowed = "ALLOWED" + case notAllowed = "NOT_ALLOWED" + case required = "REQUIRED" + public var description: String { return self.rawValue } + } + public enum AggregateFunctionName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case avg = "AVG" case count = "COUNT" @@ -53,6 +60,7 @@ extension CleanRooms { public enum AnalysisRuleType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case aggregation = "AGGREGATION" case custom = "CUSTOM" + case idMappingTable = "ID_MAPPING_TABLE" case list = "LIST" public var description: String { return self.rawValue } } @@ -69,6 +77,12 @@ extension CleanRooms { public var description: String { return self.rawValue } } + public enum AnalysisType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case additionalAnalysis = "ADDITIONAL_ANALYSIS" + case directAnalysis = "DIRECT_ANALYSIS" + public var description: String { return self.rawValue } + } + public enum CollaborationQueryLogStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -82,6 +96,13 @@ extension CleanRooms { public var description: String { return self.rawValue } } + public enum ConfiguredTableAssociationAnalysisRuleType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case aggregation = "AGGREGATION" + case custom = "CUSTOM" + case list = "LIST" + public var description: String { return self.rawValue } + } + public enum DifferentialPrivacyAggregationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case avg = "AVG" case count = "COUNT" @@ -97,6 +118,12 @@ extension CleanRooms { public var description: String { return self.rawValue } } + public enum IdNamespaceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case source = "SOURCE" + case target = "TARGET" + public var description: String { return self.rawValue } + } + public enum JoinOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case and = "AND" case or = "OR" @@ -227,14 +254,22 @@ extension CleanRooms { } public enum SchemaStatusReasonCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case additionalAnalysesNotAllowed = "ADDITIONAL_ANALYSES_NOT_ALLOWED" + case additionalAnalysesNotConfigured = "ADDITIONAL_ANALYSES_NOT_CONFIGURED" case analysisProvidersNotConfigured = "ANALYSIS_PROVIDERS_NOT_CONFIGURED" case analysisRuleMissing = "ANALYSIS_RULE_MISSING" + case analysisRuleTypesNotCompatible = "ANALYSIS_RULE_TYPES_NOT_COMPATIBLE" case analysisTemplatesNotConfigured = "ANALYSIS_TEMPLATES_NOT_CONFIGURED" + case collaborationAnalysisRuleNotConfigured = "COLLABORATION_ANALYSIS_RULE_NOT_CONFIGURED" case differentialPrivacyPolicyNotConfigured = "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED" + case idMappingTableNotPopulated = "ID_MAPPING_TABLE_NOT_POPULATED" + case resultReceiversNotAllowed = "RESULT_RECEIVERS_NOT_ALLOWED" + case resultReceiversNotConfigured = "RESULT_RECEIVERS_NOT_CONFIGURED" public var description: String { return self.rawValue } } public enum SchemaType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case idMappingTable = "ID_MAPPING_TABLE" case table = "TABLE" public var description: String { return self.rawValue } } @@ -249,6 +284,8 @@ extension CleanRooms { case aggregation(AnalysisRuleAggregation) /// Analysis rule type that enables custom SQL queries on a configured table. case custom(AnalysisRuleCustom) + /// The ID mapping table. + case idMappingTable(AnalysisRuleIdMappingTable) /// Analysis rule type that enables only list queries on a configured table. case list(AnalysisRuleList) @@ -268,6 +305,9 @@ extension CleanRooms { case .custom: let value = try container.decode(AnalysisRuleCustom.self, forKey: .custom) self = .custom(value) + case .idMappingTable: + let value = try container.decode(AnalysisRuleIdMappingTable.self, forKey: .idMappingTable) + self = .idMappingTable(value) case .list: let value = try container.decode(AnalysisRuleList.self, forKey: .list) self = .list(value) @@ -277,6 +317,7 @@ extension CleanRooms { private enum CodingKeys: String, CodingKey { case aggregation = "aggregation" case custom = "custom" + case idMappingTable = "idMappingTable" case list = "list" } } @@ -340,6 +381,66 @@ extension CleanRooms { } } + public enum ConfiguredTableAssociationAnalysisRulePolicyV1: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Analysis rule type that enables only aggregation queries on a configured table. + case aggregation(ConfiguredTableAssociationAnalysisRuleAggregation) + /// Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy. + case custom(ConfiguredTableAssociationAnalysisRuleCustom) + /// Analysis rule type that enables only list queries on a configured table. + case list(ConfiguredTableAssociationAnalysisRuleList) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .aggregation: + let value = try container.decode(ConfiguredTableAssociationAnalysisRuleAggregation.self, forKey: .aggregation) + self = .aggregation(value) + case .custom: + let value = try container.decode(ConfiguredTableAssociationAnalysisRuleCustom.self, forKey: .custom) + self = .custom(value) + case .list: + let value = try container.decode(ConfiguredTableAssociationAnalysisRuleList.self, forKey: .list) + self = .list(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .aggregation(let value): + try container.encode(value, forKey: .aggregation) + case .custom(let value): + try container.encode(value, forKey: .custom) + case .list(let value): + try container.encode(value, forKey: .list) + } + } + + public func validate(name: String) throws { + switch self { + case .aggregation(let value): + try value.validate(name: "\(name).aggregation") + case .custom(let value): + try value.validate(name: "\(name).custom") + case .list(let value): + try value.validate(name: "\(name).list") + } + } + + private enum CodingKeys: String, CodingKey { + case aggregation = "aggregation" + case custom = "custom" + case list = "list" + } + } + public enum ProtectedQueryOutput: AWSDecodableShape, Sendable { /// The list of member Amazon Web Services account(s) that received the results of the query. case memberList([ProtectedQuerySingleMemberOutput]) @@ -371,6 +472,56 @@ extension CleanRooms { } } + public enum ProtectedQueryOutputConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Required configuration for a protected query with a member output type. + case member(ProtectedQueryMemberOutputConfiguration) + /// Required configuration for a protected query with an s3 output type. + case s3(ProtectedQueryS3OutputConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .member: + let value = try container.decode(ProtectedQueryMemberOutputConfiguration.self, forKey: .member) + self = .member(value) + case .s3: + let value = try container.decode(ProtectedQueryS3OutputConfiguration.self, forKey: .s3) + self = .s3(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .member(let value): + try container.encode(value, forKey: .member) + case .s3(let value): + try container.encode(value, forKey: .s3) + } + } + + public func validate(name: String) throws { + switch self { + case .member(let value): + try value.validate(name: "\(name).member") + case .s3(let value): + try value.validate(name: "\(name).s3") + } + } + + private enum CodingKeys: String, CodingKey { + case member = "member" + case s3 = "s3" + } + } + // MARK: Shapes public struct AggregateColumn: AWSEncodableShape & AWSDecodableShape { @@ -487,6 +638,8 @@ extension CleanRooms { } public struct AnalysisRuleAggregation: AWSEncodableShape & AWSDecodableShape { + /// An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query. The additionalAnalyses parameter is currently supported for the list analysis rule (AnalysisRuleList) and the custom analysis rule (AnalysisRuleCustom). + public let additionalAnalyses: AdditionalAnalyses? /// The columns that query runners are allowed to use in aggregation queries. public let aggregateColumns: [AggregateColumn] /// Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is AND. @@ -502,7 +655,8 @@ extension CleanRooms { /// Set of scalar functions that are allowed to be used on dimension columns and the output of aggregation of metrics. public let scalarFunctions: [ScalarFunctions] - public init(aggregateColumns: [AggregateColumn], allowedJoinOperators: [JoinOperator]? = nil, dimensionColumns: [String], joinColumns: [String], joinRequired: JoinRequiredOption? = nil, outputConstraints: [AggregationConstraint], scalarFunctions: [ScalarFunctions]) { + public init(additionalAnalyses: AdditionalAnalyses? = nil, aggregateColumns: [AggregateColumn], allowedJoinOperators: [JoinOperator]? = nil, dimensionColumns: [String], joinColumns: [String], joinRequired: JoinRequiredOption? = nil, outputConstraints: [AggregationConstraint], scalarFunctions: [ScalarFunctions]) { + self.additionalAnalyses = additionalAnalyses self.aggregateColumns = aggregateColumns self.allowedJoinOperators = allowedJoinOperators self.dimensionColumns = dimensionColumns @@ -534,6 +688,7 @@ extension CleanRooms { } private enum CodingKeys: String, CodingKey { + case additionalAnalyses = "additionalAnalyses" case aggregateColumns = "aggregateColumns" case allowedJoinOperators = "allowedJoinOperators" case dimensionColumns = "dimensionColumns" @@ -545,17 +700,23 @@ extension CleanRooms { } public struct AnalysisRuleCustom: AWSEncodableShape & AWSDecodableShape { + /// An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query. + public let additionalAnalyses: AdditionalAnalyses? /// The ARN of the analysis templates that are allowed by the custom analysis rule. public let allowedAnalyses: [String] /// The IDs of the Amazon Web Services accounts that are allowed to query by the custom analysis rule. Required when allowedAnalyses is ANY_QUERY. public let allowedAnalysisProviders: [String]? /// The differential privacy configuration. public let differentialPrivacy: DifferentialPrivacyConfiguration? + /// A list of columns that aren't allowed to be shown in the query output. + public let disallowedOutputColumns: [String]? - public init(allowedAnalyses: [String], allowedAnalysisProviders: [String]? = nil, differentialPrivacy: DifferentialPrivacyConfiguration? = nil) { + public init(additionalAnalyses: AdditionalAnalyses? = nil, allowedAnalyses: [String], allowedAnalysisProviders: [String]? = nil, differentialPrivacy: DifferentialPrivacyConfiguration? = nil, disallowedOutputColumns: [String]? = nil) { + self.additionalAnalyses = additionalAnalyses self.allowedAnalyses = allowedAnalyses self.allowedAnalysisProviders = allowedAnalysisProviders self.differentialPrivacy = differentialPrivacy + self.disallowedOutputColumns = disallowedOutputColumns } public func validate(name: String) throws { @@ -569,16 +730,46 @@ extension CleanRooms { try validate($0, name: "allowedAnalysisProviders[]", parent: name, pattern: "^\\d+$") } try self.differentialPrivacy?.validate(name: "\(name).differentialPrivacy") + try self.disallowedOutputColumns?.forEach { + try validate($0, name: "disallowedOutputColumns[]", parent: name, max: 127) + try validate($0, name: "disallowedOutputColumns[]", parent: name, min: 1) + try validate($0, name: "disallowedOutputColumns[]", parent: name, pattern: "^[a-z0-9_](([a-z0-9_ ]+-)*([a-z0-9_ ]+))?$") + } } private enum CodingKeys: String, CodingKey { + case additionalAnalyses = "additionalAnalyses" case allowedAnalyses = "allowedAnalyses" case allowedAnalysisProviders = "allowedAnalysisProviders" case differentialPrivacy = "differentialPrivacy" + case disallowedOutputColumns = "disallowedOutputColumns" + } + } + + public struct AnalysisRuleIdMappingTable: AWSDecodableShape { + /// The columns that query runners are allowed to select, group by, or filter by. + public let dimensionColumns: [String]? + /// The columns that query runners are allowed to use in an INNER JOIN statement. + public let joinColumns: [String] + /// The query constraints of the analysis rule ID mapping table. + public let queryConstraints: [QueryConstraint] + + public init(dimensionColumns: [String]? = nil, joinColumns: [String], queryConstraints: [QueryConstraint]) { + self.dimensionColumns = dimensionColumns + self.joinColumns = joinColumns + self.queryConstraints = queryConstraints + } + + private enum CodingKeys: String, CodingKey { + case dimensionColumns = "dimensionColumns" + case joinColumns = "joinColumns" + case queryConstraints = "queryConstraints" } } public struct AnalysisRuleList: AWSEncodableShape & AWSDecodableShape { + /// An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query. + public let additionalAnalyses: AdditionalAnalyses? /// The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is AND. public let allowedJoinOperators: [JoinOperator]? /// Columns that can be used to join a configured table with the table of the member who can query and other members' configured tables. @@ -586,7 +777,8 @@ extension CleanRooms { /// Columns that can be listed in the output. public let listColumns: [String] - public init(allowedJoinOperators: [JoinOperator]? = nil, joinColumns: [String], listColumns: [String]) { + public init(additionalAnalyses: AdditionalAnalyses? = nil, allowedJoinOperators: [JoinOperator]? = nil, joinColumns: [String], listColumns: [String]) { + self.additionalAnalyses = additionalAnalyses self.allowedJoinOperators = allowedJoinOperators self.joinColumns = joinColumns self.listColumns = listColumns @@ -607,6 +799,7 @@ extension CleanRooms { } private enum CodingKeys: String, CodingKey { + case additionalAnalyses = "additionalAnalyses" case allowedJoinOperators = "allowedJoinOperators" case joinColumns = "joinColumns" case listColumns = "listColumns" @@ -1265,6 +1458,115 @@ extension CleanRooms { } } + public struct CollaborationIdNamespaceAssociation: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the collaboration ID namespace association. + public let arn: String + /// The Amazon Resource Name (ARN) of the collaboration that contains the collaboration ID namespace association. + public let collaborationArn: String + /// The unique identifier of the collaboration that contains the collaboration ID namespace association. + public let collaborationId: String + /// The time at which the collaboration ID namespace association was created. + public let createTime: Date + /// The unique identifier of the Amazon Web Services account that created the collaboration ID namespace association. + public let creatorAccountId: String + /// The description of the collaboration ID namespace association. + public let description: String? + /// The unique identifier of the collaboration ID namespace association. + public let id: String + public let idMappingConfig: IdMappingConfig? + /// The input reference configuration that's necessary to create the collaboration ID namespace association. + public let inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig + /// The input reference properties that are needed to create the collaboration ID namespace association. + public let inputReferenceProperties: IdNamespaceAssociationInputReferenceProperties + /// The name of the collaboration ID namespace association. + public let name: String + /// The most recent time at which the collaboration ID namespace was updated. + public let updateTime: Date + + public init(arn: String, collaborationArn: String, collaborationId: String, createTime: Date, creatorAccountId: String, description: String? = nil, id: String, idMappingConfig: IdMappingConfig? = nil, inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig, inputReferenceProperties: IdNamespaceAssociationInputReferenceProperties, name: String, updateTime: Date) { + self.arn = arn + self.collaborationArn = collaborationArn + self.collaborationId = collaborationId + self.createTime = createTime + self.creatorAccountId = creatorAccountId + self.description = description + self.id = id + self.idMappingConfig = idMappingConfig + self.inputReferenceConfig = inputReferenceConfig + self.inputReferenceProperties = inputReferenceProperties + self.name = name + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case collaborationArn = "collaborationArn" + case collaborationId = "collaborationId" + case createTime = "createTime" + case creatorAccountId = "creatorAccountId" + case description = "description" + case id = "id" + case idMappingConfig = "idMappingConfig" + case inputReferenceConfig = "inputReferenceConfig" + case inputReferenceProperties = "inputReferenceProperties" + case name = "name" + case updateTime = "updateTime" + } + } + + public struct CollaborationIdNamespaceAssociationSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the collaboration ID namespace association. + public let arn: String + /// The Amazon Resource Name (ARN) of the collaboration that contains this collaboration ID namespace association. + public let collaborationArn: String + /// The unique identifier of the collaboration that contains this collaboration ID namespace association. + public let collaborationId: String + /// The time at which the collaboration ID namespace association was created. + public let createTime: Date + /// The Amazon Web Services account that created this collaboration ID namespace association. + public let creatorAccountId: String + /// The description of the collaboration ID namepsace association. + public let description: String? + /// The unique identifier of the collaboration ID namespace association. + public let id: String + /// The input reference configuration that's used to create the collaboration ID namespace association. + public let inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig + /// The input reference properties that are used to create the collaboration ID namespace association. + public let inputReferenceProperties: IdNamespaceAssociationInputReferencePropertiesSummary + /// The name of the collaboration ID namespace association. + public let name: String + /// The most recent time at which the collaboration ID namespace association was updated. + public let updateTime: Date + + public init(arn: String, collaborationArn: String, collaborationId: String, createTime: Date, creatorAccountId: String, description: String? = nil, id: String, inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig, inputReferenceProperties: IdNamespaceAssociationInputReferencePropertiesSummary, name: String, updateTime: Date) { + self.arn = arn + self.collaborationArn = collaborationArn + self.collaborationId = collaborationId + self.createTime = createTime + self.creatorAccountId = creatorAccountId + self.description = description + self.id = id + self.inputReferenceConfig = inputReferenceConfig + self.inputReferenceProperties = inputReferenceProperties + self.name = name + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case collaborationArn = "collaborationArn" + case collaborationId = "collaborationId" + case createTime = "createTime" + case creatorAccountId = "creatorAccountId" + case description = "description" + case id = "id" + case inputReferenceConfig = "inputReferenceConfig" + case inputReferenceProperties = "inputReferenceProperties" + case name = "name" + case updateTime = "updateTime" + } + } + public struct CollaborationPrivacyBudgetSummary: AWSDecodableShape { /// The includes epsilon provided and utility in terms of aggregations. public let budget: PrivacyBudget @@ -1663,6 +1965,8 @@ extension CleanRooms { } public struct ConfiguredTableAssociation: AWSDecodableShape { + /// The analysis rule types for the configured table association. + public let analysisRuleTypes: [ConfiguredTableAssociationAnalysisRuleType]? /// The unique ARN for the configured table association. public let arn: String /// The unique ARN for the configured table that the association refers to. @@ -1686,7 +1990,8 @@ extension CleanRooms { /// The time the configured table association was last updated. public let updateTime: Date - public init(arn: String, configuredTableArn: String, configuredTableId: String, createTime: Date, description: String? = nil, id: String, membershipArn: String, membershipId: String, name: String, roleArn: String, updateTime: Date) { + public init(analysisRuleTypes: [ConfiguredTableAssociationAnalysisRuleType]? = nil, arn: String, configuredTableArn: String, configuredTableId: String, createTime: Date, description: String? = nil, id: String, membershipArn: String, membershipId: String, name: String, roleArn: String, updateTime: Date) { + self.analysisRuleTypes = analysisRuleTypes self.arn = arn self.configuredTableArn = configuredTableArn self.configuredTableId = configuredTableId @@ -1701,6 +2006,7 @@ extension CleanRooms { } private enum CodingKeys: String, CodingKey { + case analysisRuleTypes = "analysisRuleTypes" case arn = "arn" case configuredTableArn = "configuredTableArn" case configuredTableId = "configuredTableId" @@ -1715,6 +2021,133 @@ extension CleanRooms { } } + public struct ConfiguredTableAssociationAnalysisRule: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the configured table association. + public let configuredTableAssociationArn: String + /// The unique identifier for the configured table association. + public let configuredTableAssociationId: String + /// The creation time of the configured table association analysis rule. + public let createTime: Date + /// The membership identifier for the configured table association analysis rule. + public let membershipIdentifier: String + /// The policy of the configured table association analysis rule. + public let policy: ConfiguredTableAssociationAnalysisRulePolicy + /// The type of the configured table association analysis rule. + public let type: ConfiguredTableAssociationAnalysisRuleType + /// The update time of the configured table association analysis rule. + public let updateTime: Date + + public init(configuredTableAssociationArn: String, configuredTableAssociationId: String, createTime: Date, membershipIdentifier: String, policy: ConfiguredTableAssociationAnalysisRulePolicy, type: ConfiguredTableAssociationAnalysisRuleType, updateTime: Date) { + self.configuredTableAssociationArn = configuredTableAssociationArn + self.configuredTableAssociationId = configuredTableAssociationId + self.createTime = createTime + self.membershipIdentifier = membershipIdentifier + self.policy = policy + self.type = type + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case configuredTableAssociationArn = "configuredTableAssociationArn" + case configuredTableAssociationId = "configuredTableAssociationId" + case createTime = "createTime" + case membershipIdentifier = "membershipIdentifier" + case policy = "policy" + case type = "type" + case updateTime = "updateTime" + } + } + + public struct ConfiguredTableAssociationAnalysisRuleAggregation: AWSEncodableShape & AWSDecodableShape { + /// The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output. The allowedAdditionalAnalyses parameter is currently supported for the list analysis rule (AnalysisRuleList) and the custom analysis rule (AnalysisRuleCustom). + public let allowedAdditionalAnalyses: [String]? + /// The list of collaboration members who are allowed to receive results of queries run with this configured table. + public let allowedResultReceivers: [String]? + + public init(allowedAdditionalAnalyses: [String]? = nil, allowedResultReceivers: [String]? = nil) { + self.allowedAdditionalAnalyses = allowedAdditionalAnalyses + self.allowedResultReceivers = allowedResultReceivers + } + + public func validate(name: String) throws { + try self.allowedAdditionalAnalyses?.forEach { + try validate($0, name: "allowedAdditionalAnalyses[]", parent: name, max: 256) + try validate($0, name: "allowedAdditionalAnalyses[]", parent: name, pattern: "^arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:([\\d]{12}|\\*):membership/[\\*\\d\\w-]+/configuredaudiencemodelassociation/[\\*\\d\\w-]+$") + } + try self.validate(self.allowedAdditionalAnalyses, name: "allowedAdditionalAnalyses", parent: name, max: 25) + try self.allowedResultReceivers?.forEach { + try validate($0, name: "allowedResultReceivers[]", parent: name, max: 12) + try validate($0, name: "allowedResultReceivers[]", parent: name, min: 12) + try validate($0, name: "allowedResultReceivers[]", parent: name, pattern: "^\\d+$") + } + } + + private enum CodingKeys: String, CodingKey { + case allowedAdditionalAnalyses = "allowedAdditionalAnalyses" + case allowedResultReceivers = "allowedResultReceivers" + } + } + + public struct ConfiguredTableAssociationAnalysisRuleCustom: AWSEncodableShape & AWSDecodableShape { + /// The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output. + public let allowedAdditionalAnalyses: [String]? + /// The list of collaboration members who are allowed to receive results of queries run with this configured table. + public let allowedResultReceivers: [String]? + + public init(allowedAdditionalAnalyses: [String]? = nil, allowedResultReceivers: [String]? = nil) { + self.allowedAdditionalAnalyses = allowedAdditionalAnalyses + self.allowedResultReceivers = allowedResultReceivers + } + + public func validate(name: String) throws { + try self.allowedAdditionalAnalyses?.forEach { + try validate($0, name: "allowedAdditionalAnalyses[]", parent: name, max: 256) + try validate($0, name: "allowedAdditionalAnalyses[]", parent: name, pattern: "^arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:([\\d]{12}|\\*):membership/[\\*\\d\\w-]+/configuredaudiencemodelassociation/[\\*\\d\\w-]+$") + } + try self.validate(self.allowedAdditionalAnalyses, name: "allowedAdditionalAnalyses", parent: name, max: 25) + try self.allowedResultReceivers?.forEach { + try validate($0, name: "allowedResultReceivers[]", parent: name, max: 12) + try validate($0, name: "allowedResultReceivers[]", parent: name, min: 12) + try validate($0, name: "allowedResultReceivers[]", parent: name, pattern: "^\\d+$") + } + } + + private enum CodingKeys: String, CodingKey { + case allowedAdditionalAnalyses = "allowedAdditionalAnalyses" + case allowedResultReceivers = "allowedResultReceivers" + } + } + + public struct ConfiguredTableAssociationAnalysisRuleList: AWSEncodableShape & AWSDecodableShape { + /// The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output. + public let allowedAdditionalAnalyses: [String]? + /// The list of collaboration members who are allowed to receive results of queries run with this configured table. + public let allowedResultReceivers: [String]? + + public init(allowedAdditionalAnalyses: [String]? = nil, allowedResultReceivers: [String]? = nil) { + self.allowedAdditionalAnalyses = allowedAdditionalAnalyses + self.allowedResultReceivers = allowedResultReceivers + } + + public func validate(name: String) throws { + try self.allowedAdditionalAnalyses?.forEach { + try validate($0, name: "allowedAdditionalAnalyses[]", parent: name, max: 256) + try validate($0, name: "allowedAdditionalAnalyses[]", parent: name, pattern: "^arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:([\\d]{12}|\\*):membership/[\\*\\d\\w-]+/configuredaudiencemodelassociation/[\\*\\d\\w-]+$") + } + try self.validate(self.allowedAdditionalAnalyses, name: "allowedAdditionalAnalyses", parent: name, max: 25) + try self.allowedResultReceivers?.forEach { + try validate($0, name: "allowedResultReceivers[]", parent: name, max: 12) + try validate($0, name: "allowedResultReceivers[]", parent: name, min: 12) + try validate($0, name: "allowedResultReceivers[]", parent: name, pattern: "^\\d+$") + } + } + + private enum CodingKeys: String, CodingKey { + case allowedAdditionalAnalyses = "allowedAdditionalAnalyses" + case allowedResultReceivers = "allowedResultReceivers" + } + } + public struct ConfiguredTableAssociationSummary: AWSDecodableShape { /// The unique ARN for the configured table association. public let arn: String @@ -1943,7 +2376,7 @@ extension CleanRooms { } public struct CreateCollaborationOutput: AWSDecodableShape { - /// The entire created collaboration object. + /// The collaboration. public let collaboration: Collaboration public init(collaboration: Collaboration) { @@ -2032,7 +2465,7 @@ extension CleanRooms { } public struct CreateConfiguredTableAnalysisRuleInput: AWSEncodableShape { - /// The entire created configured table analysis rule object. + /// The analysis rule policy that was created for the configured table. public let analysisRulePolicy: ConfiguredTableAnalysisRulePolicy /// The type of analysis rule. public let analysisRuleType: ConfiguredTableAnalysisRuleType @@ -2067,7 +2500,7 @@ extension CleanRooms { } public struct CreateConfiguredTableAnalysisRuleOutput: AWSDecodableShape { - /// The entire created analysis rule. + /// The analysis rule that was created for the configured table. public let analysisRule: ConfiguredTableAnalysisRule public init(analysisRule: ConfiguredTableAnalysisRule) { @@ -2079,25 +2512,80 @@ extension CleanRooms { } } - public struct CreateConfiguredTableAssociationInput: AWSEncodableShape { - /// A unique identifier for the configured table to be associated to. Currently accepts a configured table ID. - public let configuredTableIdentifier: String - /// A description for the configured table association. - public let description: String? - /// A unique identifier for one of your memberships for a collaboration. The configured table is associated to the collaboration that this membership belongs to. Currently accepts a membership ID. + public struct CreateConfiguredTableAssociationAnalysisRuleInput: AWSEncodableShape { + /// The analysis rule policy that was created for the configured table association. + public let analysisRulePolicy: ConfiguredTableAssociationAnalysisRulePolicy + /// The type of analysis rule. + public let analysisRuleType: ConfiguredTableAssociationAnalysisRuleType + /// The unique ID for the configured table association. Currently accepts the configured table association ID. + public let configuredTableAssociationIdentifier: String + /// A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID. public let membershipIdentifier: String - /// The name of the configured table association. This name is used to query the underlying configured table. - public let name: String - /// The service will assume this role to access catalog metadata and query the table. - public let roleArn: String - /// An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource. - public let tags: [String: String]? - public init(configuredTableIdentifier: String, description: String? = nil, membershipIdentifier: String, name: String, roleArn: String, tags: [String: String]? = nil) { - self.configuredTableIdentifier = configuredTableIdentifier - self.description = description + public init(analysisRulePolicy: ConfiguredTableAssociationAnalysisRulePolicy, analysisRuleType: ConfiguredTableAssociationAnalysisRuleType, configuredTableAssociationIdentifier: String, membershipIdentifier: String) { + self.analysisRulePolicy = analysisRulePolicy + self.analysisRuleType = analysisRuleType + self.configuredTableAssociationIdentifier = configuredTableAssociationIdentifier self.membershipIdentifier = membershipIdentifier - self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.analysisRulePolicy, forKey: .analysisRulePolicy) + try container.encode(self.analysisRuleType, forKey: .analysisRuleType) + request.encodePath(self.configuredTableAssociationIdentifier, key: "configuredTableAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.analysisRulePolicy.validate(name: "\(name).analysisRulePolicy") + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, max: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, min: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case analysisRulePolicy = "analysisRulePolicy" + case analysisRuleType = "analysisRuleType" + } + } + + public struct CreateConfiguredTableAssociationAnalysisRuleOutput: AWSDecodableShape { + /// The analysis rule for the configured table association. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule. + public let analysisRule: ConfiguredTableAssociationAnalysisRule + + public init(analysisRule: ConfiguredTableAssociationAnalysisRule) { + self.analysisRule = analysisRule + } + + private enum CodingKeys: String, CodingKey { + case analysisRule = "analysisRule" + } + } + + public struct CreateConfiguredTableAssociationInput: AWSEncodableShape { + /// A unique identifier for the configured table to be associated to. Currently accepts a configured table ID. + public let configuredTableIdentifier: String + /// A description for the configured table association. + public let description: String? + /// A unique identifier for one of your memberships for a collaboration. The configured table is associated to the collaboration that this membership belongs to. Currently accepts a membership ID. + public let membershipIdentifier: String + /// The name of the configured table association. This name is used to query the underlying configured table. + public let name: String + /// The service will assume this role to access catalog metadata and query the table. + public let roleArn: String + /// An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource. + public let tags: [String: String]? + + public init(configuredTableIdentifier: String, description: String? = nil, membershipIdentifier: String, name: String, roleArn: String, tags: [String: String]? = nil) { + self.configuredTableIdentifier = configuredTableIdentifier + self.description = description + self.membershipIdentifier = membershipIdentifier + self.name = name self.roleArn = roleArn self.tags = tags } @@ -2145,7 +2633,7 @@ extension CleanRooms { } public struct CreateConfiguredTableAssociationOutput: AWSDecodableShape { - /// The entire configured table association object. + /// The configured table association. public let configuredTableAssociation: ConfiguredTableAssociation public init(configuredTableAssociation: ConfiguredTableAssociation) { @@ -2224,6 +2712,156 @@ extension CleanRooms { } } + public struct CreateIdMappingTableInput: AWSEncodableShape { + /// A description of the ID mapping table. + public let description: String? + /// The input reference configuration needed to create the ID mapping table. + public let inputReferenceConfig: IdMappingTableInputReferenceConfig + /// The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This value is used to encrypt the mapping table data that is stored by Clean Rooms. + public let kmsKeyArn: String? + /// The unique identifier of the membership that contains the ID mapping table. + public let membershipIdentifier: String + /// A name for the ID mapping table. + public let name: String + /// An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource. + public let tags: [String: String]? + + public init(description: String? = nil, inputReferenceConfig: IdMappingTableInputReferenceConfig, kmsKeyArn: String? = nil, membershipIdentifier: String, name: String, tags: [String: String]? = nil) { + self.description = description + self.inputReferenceConfig = inputReferenceConfig + self.kmsKeyArn = kmsKeyArn + self.membershipIdentifier = membershipIdentifier + self.name = name + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.inputReferenceConfig, forKey: .inputReferenceConfig) + try container.encodeIfPresent(self.kmsKeyArn, forKey: .kmsKeyArn) + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*$") + try self.inputReferenceConfig.validate(name: "\(name).inputReferenceConfig") + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, min: 20) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:aws:kms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:key/[a-zA-Z0-9-]+$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case inputReferenceConfig = "inputReferenceConfig" + case kmsKeyArn = "kmsKeyArn" + case name = "name" + case tags = "tags" + } + } + + public struct CreateIdMappingTableOutput: AWSDecodableShape { + /// The ID mapping table that was created. + public let idMappingTable: IdMappingTable + + public init(idMappingTable: IdMappingTable) { + self.idMappingTable = idMappingTable + } + + private enum CodingKeys: String, CodingKey { + case idMappingTable = "idMappingTable" + } + } + + public struct CreateIdNamespaceAssociationInput: AWSEncodableShape { + /// The description of the ID namespace association. + public let description: String? + /// The configuration settings for the ID mapping table. + public let idMappingConfig: IdMappingConfig? + /// The input reference configuration needed to create the ID namespace association. + public let inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig + /// The unique identifier of the membership that contains the ID namespace association. + public let membershipIdentifier: String + /// The name for the ID namespace association. + public let name: String + /// An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource. + public let tags: [String: String]? + + public init(description: String? = nil, idMappingConfig: IdMappingConfig? = nil, inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig, membershipIdentifier: String, name: String, tags: [String: String]? = nil) { + self.description = description + self.idMappingConfig = idMappingConfig + self.inputReferenceConfig = inputReferenceConfig + self.membershipIdentifier = membershipIdentifier + self.name = name + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.idMappingConfig, forKey: .idMappingConfig) + try container.encode(self.inputReferenceConfig, forKey: .inputReferenceConfig) + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*$") + try self.inputReferenceConfig.validate(name: "\(name).inputReferenceConfig") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 100) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case idMappingConfig = "idMappingConfig" + case inputReferenceConfig = "inputReferenceConfig" + case name = "name" + case tags = "tags" + } + } + + public struct CreateIdNamespaceAssociationOutput: AWSDecodableShape { + /// The ID namespace association that was created. + public let idNamespaceAssociation: IdNamespaceAssociation + + public init(idNamespaceAssociation: IdNamespaceAssociation) { + self.idNamespaceAssociation = idNamespaceAssociation + } + + private enum CodingKeys: String, CodingKey { + case idNamespaceAssociation = "idNamespaceAssociation" + } + } + public struct CreateMembershipInput: AWSEncodableShape { /// The unique ID for the associated collaboration. public let collaborationIdentifier: String @@ -2494,6 +3132,44 @@ extension CleanRooms { public init() {} } + public struct DeleteConfiguredTableAssociationAnalysisRuleInput: AWSEncodableShape { + /// The type of the analysis rule that you want to delete. + public let analysisRuleType: ConfiguredTableAssociationAnalysisRuleType + /// The identifier for the configured table association that's related to the analysis rule that you want to delete. + public let configuredTableAssociationIdentifier: String + /// A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID. + public let membershipIdentifier: String + + public init(analysisRuleType: ConfiguredTableAssociationAnalysisRuleType, configuredTableAssociationIdentifier: String, membershipIdentifier: String) { + self.analysisRuleType = analysisRuleType + self.configuredTableAssociationIdentifier = configuredTableAssociationIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.analysisRuleType, key: "analysisRuleType") + request.encodePath(self.configuredTableAssociationIdentifier, key: "configuredTableAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, max: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, min: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteConfiguredTableAssociationAnalysisRuleOutput: AWSDecodableShape { + public init() {} + } + public struct DeleteConfiguredTableAssociationInput: AWSEncodableShape { /// The unique ID for the configured table association to be deleted. Currently accepts the configured table ID. public let configuredTableAssociationIdentifier: String @@ -2555,6 +3231,74 @@ extension CleanRooms { public init() {} } + public struct DeleteIdMappingTableInput: AWSEncodableShape { + /// The unique identifier of the ID mapping table that you want to delete. + public let idMappingTableIdentifier: String + /// The unique identifier of the membership that contains the ID mapping table that you want to delete. + public let membershipIdentifier: String + + public init(idMappingTableIdentifier: String, membershipIdentifier: String) { + self.idMappingTableIdentifier = idMappingTableIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.idMappingTableIdentifier, key: "idMappingTableIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, max: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, min: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteIdMappingTableOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteIdNamespaceAssociationInput: AWSEncodableShape { + /// The unique identifier of the ID namespace association that you want to delete. + public let idNamespaceAssociationIdentifier: String + /// The unique identifier of the membership that contains the ID namespace association that you want to delete. + public let membershipIdentifier: String + + public init(idNamespaceAssociationIdentifier: String, membershipIdentifier: String) { + self.idNamespaceAssociationIdentifier = idNamespaceAssociationIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.idNamespaceAssociationIdentifier, key: "idNamespaceAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, max: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, min: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteIdNamespaceAssociationOutput: AWSDecodableShape { + public init() {} + } + public struct DeleteMemberInput: AWSEncodableShape { /// The account ID of the member to remove. public let accountId: String @@ -2880,6 +3624,19 @@ extension CleanRooms { } } + public struct DirectAnalysisConfigurationDetails: AWSDecodableShape { + /// The account IDs for the member who received the results of a protected query. + public let receiverAccountIds: [String]? + + public init(receiverAccountIds: [String]? = nil) { + self.receiverAccountIds = receiverAccountIds + } + + private enum CodingKeys: String, CodingKey { + case receiverAccountIds = "receiverAccountIds" + } + } + public struct GetAnalysisTemplateInput: AWSEncodableShape { /// The identifier for the analysis template resource. public let analysisTemplateIdentifier: String @@ -3008,44 +3765,87 @@ extension CleanRooms { } } - public struct GetCollaborationInput: AWSEncodableShape { - /// The identifier for the collaboration. + public struct GetCollaborationIdNamespaceAssociationInput: AWSEncodableShape { + /// The unique identifier of the collaboration that contains the ID namespace association that you want to retrieve. public let collaborationIdentifier: String + /// The unique identifier of the ID namespace association that you want to retrieve. + public let idNamespaceAssociationIdentifier: String - public init(collaborationIdentifier: String) { + public init(collaborationIdentifier: String, idNamespaceAssociationIdentifier: String) { self.collaborationIdentifier = collaborationIdentifier + self.idNamespaceAssociationIdentifier = idNamespaceAssociationIdentifier } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.collaborationIdentifier, key: "collaborationIdentifier") + request.encodePath(self.idNamespaceAssociationIdentifier, key: "idNamespaceAssociationIdentifier") } public func validate(name: String) throws { try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, max: 36) try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, min: 36) try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, max: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, min: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") } private enum CodingKeys: CodingKey {} } - public struct GetCollaborationOutput: AWSDecodableShape { - /// The entire collaboration for this identifier. - public let collaboration: Collaboration + public struct GetCollaborationIdNamespaceAssociationOutput: AWSDecodableShape { + /// The ID namespace association that you requested. + public let collaborationIdNamespaceAssociation: CollaborationIdNamespaceAssociation - public init(collaboration: Collaboration) { - self.collaboration = collaboration + public init(collaborationIdNamespaceAssociation: CollaborationIdNamespaceAssociation) { + self.collaborationIdNamespaceAssociation = collaborationIdNamespaceAssociation } private enum CodingKeys: String, CodingKey { - case collaboration = "collaboration" + case collaborationIdNamespaceAssociation = "collaborationIdNamespaceAssociation" } } - public struct GetCollaborationPrivacyBudgetTemplateInput: AWSEncodableShape { - /// A unique identifier for one of your collaborations. + public struct GetCollaborationInput: AWSEncodableShape { + /// The identifier for the collaboration. + public let collaborationIdentifier: String + + public init(collaborationIdentifier: String) { + self.collaborationIdentifier = collaborationIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.collaborationIdentifier, key: "collaborationIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, max: 36) + try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, min: 36) + try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetCollaborationOutput: AWSDecodableShape { + /// The entire collaboration for this identifier. + public let collaboration: Collaboration + + public init(collaboration: Collaboration) { + self.collaboration = collaboration + } + + private enum CodingKeys: String, CodingKey { + case collaboration = "collaboration" + } + } + + public struct GetCollaborationPrivacyBudgetTemplateInput: AWSEncodableShape { + /// A unique identifier for one of your collaborations. public let collaborationIdentifier: String /// A unique identifier for one of your privacy budget templates. public let privacyBudgetTemplateIdentifier: String @@ -3170,6 +3970,53 @@ extension CleanRooms { } } + public struct GetConfiguredTableAssociationAnalysisRuleInput: AWSEncodableShape { + /// The type of analysis rule that you want to retrieve. + public let analysisRuleType: ConfiguredTableAssociationAnalysisRuleType + /// The identifier for the configured table association that's related to the analysis rule. + public let configuredTableAssociationIdentifier: String + /// A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID. + public let membershipIdentifier: String + + public init(analysisRuleType: ConfiguredTableAssociationAnalysisRuleType, configuredTableAssociationIdentifier: String, membershipIdentifier: String) { + self.analysisRuleType = analysisRuleType + self.configuredTableAssociationIdentifier = configuredTableAssociationIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.analysisRuleType, key: "analysisRuleType") + request.encodePath(self.configuredTableAssociationIdentifier, key: "configuredTableAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, max: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, min: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetConfiguredTableAssociationAnalysisRuleOutput: AWSDecodableShape { + /// The analysis rule for the configured table association. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule. + public let analysisRule: ConfiguredTableAssociationAnalysisRule + + public init(analysisRule: ConfiguredTableAssociationAnalysisRule) { + self.analysisRule = analysisRule + } + + private enum CodingKeys: String, CodingKey { + case analysisRule = "analysisRule" + } + } + public struct GetConfiguredTableAssociationInput: AWSEncodableShape { /// The unique ID for the configured table association to retrieve. Currently accepts the configured table ID. public let configuredTableAssociationIdentifier: String @@ -3249,6 +4096,92 @@ extension CleanRooms { } } + public struct GetIdMappingTableInput: AWSEncodableShape { + /// The unique identifier of the ID mapping table identifier that you want to retrieve. + public let idMappingTableIdentifier: String + /// The unique identifier of the membership that contains the ID mapping table that you want to retrieve. + public let membershipIdentifier: String + + public init(idMappingTableIdentifier: String, membershipIdentifier: String) { + self.idMappingTableIdentifier = idMappingTableIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.idMappingTableIdentifier, key: "idMappingTableIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, max: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, min: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetIdMappingTableOutput: AWSDecodableShape { + /// The ID mapping table that you requested. + public let idMappingTable: IdMappingTable + + public init(idMappingTable: IdMappingTable) { + self.idMappingTable = idMappingTable + } + + private enum CodingKeys: String, CodingKey { + case idMappingTable = "idMappingTable" + } + } + + public struct GetIdNamespaceAssociationInput: AWSEncodableShape { + /// The unique identifier of the ID namespace association that you want to retrieve. + public let idNamespaceAssociationIdentifier: String + /// The unique identifier of the membership that contains the ID namespace association that you want to retrieve. + public let membershipIdentifier: String + + public init(idNamespaceAssociationIdentifier: String, membershipIdentifier: String) { + self.idNamespaceAssociationIdentifier = idNamespaceAssociationIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.idNamespaceAssociationIdentifier, key: "idNamespaceAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, max: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, min: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetIdNamespaceAssociationOutput: AWSDecodableShape { + /// The ID namespace association that you requested. + public let idNamespaceAssociation: IdNamespaceAssociation + + public init(idNamespaceAssociation: IdNamespaceAssociation) { + self.idNamespaceAssociation = idNamespaceAssociation + } + + private enum CodingKeys: String, CodingKey { + case idNamespaceAssociation = "idNamespaceAssociation" + } + } + public struct GetMembershipInput: AWSEncodableShape { /// The identifier for a membership resource. public let membershipIdentifier: String @@ -3483,6 +4416,369 @@ extension CleanRooms { } } + public struct IdMappingConfig: AWSEncodableShape & AWSDecodableShape { + /// An indicator as to whether you can use your column as a dimension column in the ID mapping table (TRUE) or not (FALSE). Default is FALSE. + public let allowUseAsDimensionColumn: Bool + + public init(allowUseAsDimensionColumn: Bool) { + self.allowUseAsDimensionColumn = allowUseAsDimensionColumn + } + + private enum CodingKeys: String, CodingKey { + case allowUseAsDimensionColumn = "allowUseAsDimensionColumn" + } + } + + public struct IdMappingTable: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the ID mapping table. + public let arn: String + /// The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table. + public let collaborationArn: String + /// The unique identifier of the collaboration that contains this ID mapping table. + public let collaborationId: String + /// The time at which the ID mapping table was created. + public let createTime: Date + /// The description of the ID mapping table. + public let description: String? + /// The unique identifier of the ID mapping table. + public let id: String + /// The input reference configuration for the ID mapping table. + public let inputReferenceConfig: IdMappingTableInputReferenceConfig + /// The input reference properties for the ID mapping table. + public let inputReferenceProperties: IdMappingTableInputReferenceProperties + /// The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. + public let kmsKeyArn: String? + /// The Amazon Resource Name (ARN) of the membership resource for the ID mapping table. + public let membershipArn: String + /// The unique identifier of the membership resource for the ID mapping table. + public let membershipId: String + /// The name of the ID mapping table. + public let name: String + /// The most recent time at which the ID mapping table was updated. + public let updateTime: Date + + public init(arn: String, collaborationArn: String, collaborationId: String, createTime: Date, description: String? = nil, id: String, inputReferenceConfig: IdMappingTableInputReferenceConfig, inputReferenceProperties: IdMappingTableInputReferenceProperties, kmsKeyArn: String? = nil, membershipArn: String, membershipId: String, name: String, updateTime: Date) { + self.arn = arn + self.collaborationArn = collaborationArn + self.collaborationId = collaborationId + self.createTime = createTime + self.description = description + self.id = id + self.inputReferenceConfig = inputReferenceConfig + self.inputReferenceProperties = inputReferenceProperties + self.kmsKeyArn = kmsKeyArn + self.membershipArn = membershipArn + self.membershipId = membershipId + self.name = name + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case collaborationArn = "collaborationArn" + case collaborationId = "collaborationId" + case createTime = "createTime" + case description = "description" + case id = "id" + case inputReferenceConfig = "inputReferenceConfig" + case inputReferenceProperties = "inputReferenceProperties" + case kmsKeyArn = "kmsKeyArn" + case membershipArn = "membershipArn" + case membershipId = "membershipId" + case name = "name" + case updateTime = "updateTime" + } + } + + public struct IdMappingTableInputReferenceConfig: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the referenced resource in Entity Resolution. Valid values are ID mapping workflow ARNs. + public let inputReferenceArn: String + /// When TRUE, Clean Rooms manages permissions for the ID mapping table resource. When FALSE, the resource owner manages permissions for the ID mapping table resource. + public let manageResourcePolicies: Bool + + public init(inputReferenceArn: String, manageResourcePolicies: Bool) { + self.inputReferenceArn = inputReferenceArn + self.manageResourcePolicies = manageResourcePolicies + } + + public func validate(name: String) throws { + try self.validate(self.inputReferenceArn, name: "inputReferenceArn", parent: name, max: 2048) + try self.validate(self.inputReferenceArn, name: "inputReferenceArn", parent: name, min: 20) + try self.validate(self.inputReferenceArn, name: "inputReferenceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(idmappingworkflow/.*)$") + } + + private enum CodingKeys: String, CodingKey { + case inputReferenceArn = "inputReferenceArn" + case manageResourcePolicies = "manageResourcePolicies" + } + } + + public struct IdMappingTableInputReferenceProperties: AWSDecodableShape { + /// The input source of the ID mapping table. + public let idMappingTableInputSource: [IdMappingTableInputSource] + + public init(idMappingTableInputSource: [IdMappingTableInputSource]) { + self.idMappingTableInputSource = idMappingTableInputSource + } + + private enum CodingKeys: String, CodingKey { + case idMappingTableInputSource = "idMappingTableInputSource" + } + } + + public struct IdMappingTableInputSource: AWSDecodableShape { + /// The unique identifier of the ID namespace association. + public let idNamespaceAssociationId: String + /// The type of the input source of the ID mapping table. + public let type: IdNamespaceType + + public init(idNamespaceAssociationId: String, type: IdNamespaceType) { + self.idNamespaceAssociationId = idNamespaceAssociationId + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case idNamespaceAssociationId = "idNamespaceAssociationId" + case type = "type" + } + } + + public struct IdMappingTableSchemaTypeProperties: AWSDecodableShape { + /// Defines which ID namespace associations are used to create the ID mapping table. + public let idMappingTableInputSource: [IdMappingTableInputSource] + + public init(idMappingTableInputSource: [IdMappingTableInputSource]) { + self.idMappingTableInputSource = idMappingTableInputSource + } + + private enum CodingKeys: String, CodingKey { + case idMappingTableInputSource = "idMappingTableInputSource" + } + } + + public struct IdMappingTableSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of this ID mapping table. + public let arn: String + /// The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table. + public let collaborationArn: String + /// The unique identifier of the collaboration that contains this ID mapping table. + public let collaborationId: String + /// The time at which this ID mapping table was created. + public let createTime: Date + /// The description of this ID mapping table. + public let description: String? + /// The unique identifier of this ID mapping table. + public let id: String + /// The input reference configuration for the ID mapping table. + public let inputReferenceConfig: IdMappingTableInputReferenceConfig + /// The Amazon Resource Name (ARN) of the membership resource for this ID mapping table. + public let membershipArn: String + /// The unique identifier of the membership resource for this ID mapping table. + public let membershipId: String + /// The name of this ID mapping table. + public let name: String + /// The most recent time at which this ID mapping table was updated. + public let updateTime: Date + + public init(arn: String, collaborationArn: String, collaborationId: String, createTime: Date, description: String? = nil, id: String, inputReferenceConfig: IdMappingTableInputReferenceConfig, membershipArn: String, membershipId: String, name: String, updateTime: Date) { + self.arn = arn + self.collaborationArn = collaborationArn + self.collaborationId = collaborationId + self.createTime = createTime + self.description = description + self.id = id + self.inputReferenceConfig = inputReferenceConfig + self.membershipArn = membershipArn + self.membershipId = membershipId + self.name = name + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case collaborationArn = "collaborationArn" + case collaborationId = "collaborationId" + case createTime = "createTime" + case description = "description" + case id = "id" + case inputReferenceConfig = "inputReferenceConfig" + case membershipArn = "membershipArn" + case membershipId = "membershipId" + case name = "name" + case updateTime = "updateTime" + } + } + + public struct IdNamespaceAssociation: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the ID namespace association. + public let arn: String + /// The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association. + public let collaborationArn: String + /// The unique identifier of the collaboration that contains this ID namespace association. + public let collaborationId: String + /// The time at which the ID namespace association was created. + public let createTime: Date + /// The description of the ID namespace association. + public let description: String? + /// The unique identifier for this ID namespace association. + public let id: String + /// The configuration settings for the ID mapping table. + public let idMappingConfig: IdMappingConfig? + /// The input reference configuration for the ID namespace association. + public let inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig + /// The input reference properties for the ID namespace association. + public let inputReferenceProperties: IdNamespaceAssociationInputReferenceProperties + /// The Amazon Resource Name (ARN) of the membership resource for this ID namespace association. + public let membershipArn: String + /// The unique identifier of the membership resource for this ID namespace association. + public let membershipId: String + /// The name of this ID namespace association. + public let name: String + /// The most recent time at which the ID namespace association was updated. + public let updateTime: Date + + public init(arn: String, collaborationArn: String, collaborationId: String, createTime: Date, description: String? = nil, id: String, idMappingConfig: IdMappingConfig? = nil, inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig, inputReferenceProperties: IdNamespaceAssociationInputReferenceProperties, membershipArn: String, membershipId: String, name: String, updateTime: Date) { + self.arn = arn + self.collaborationArn = collaborationArn + self.collaborationId = collaborationId + self.createTime = createTime + self.description = description + self.id = id + self.idMappingConfig = idMappingConfig + self.inputReferenceConfig = inputReferenceConfig + self.inputReferenceProperties = inputReferenceProperties + self.membershipArn = membershipArn + self.membershipId = membershipId + self.name = name + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case collaborationArn = "collaborationArn" + case collaborationId = "collaborationId" + case createTime = "createTime" + case description = "description" + case id = "id" + case idMappingConfig = "idMappingConfig" + case inputReferenceConfig = "inputReferenceConfig" + case inputReferenceProperties = "inputReferenceProperties" + case membershipArn = "membershipArn" + case membershipId = "membershipId" + case name = "name" + case updateTime = "updateTime" + } + } + + public struct IdNamespaceAssociationInputReferenceConfig: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own. + public let inputReferenceArn: String + /// When TRUE, Clean Rooms manages permissions for the ID namespace association resource. When FALSE, the resource owner manages permissions for the ID namespace association resource. + public let manageResourcePolicies: Bool + + public init(inputReferenceArn: String, manageResourcePolicies: Bool) { + self.inputReferenceArn = inputReferenceArn + self.manageResourcePolicies = manageResourcePolicies + } + + public func validate(name: String) throws { + try self.validate(self.inputReferenceArn, name: "inputReferenceArn", parent: name, max: 256) + try self.validate(self.inputReferenceArn, name: "inputReferenceArn", parent: name, pattern: "^arn:aws:entityresolution:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:idnamespace/[\\d\\w-]+$") + } + + private enum CodingKeys: String, CodingKey { + case inputReferenceArn = "inputReferenceArn" + case manageResourcePolicies = "manageResourcePolicies" + } + } + + public struct IdNamespaceAssociationInputReferenceProperties: AWSDecodableShape { + /// Defines how ID mapping workflows are supported for this ID namespace association. + public let idMappingWorkflowsSupported: [String] + /// The ID namespace type for this ID namespace association. + public let idNamespaceType: IdNamespaceType + + public init(idMappingWorkflowsSupported: [String], idNamespaceType: IdNamespaceType) { + self.idMappingWorkflowsSupported = idMappingWorkflowsSupported + self.idNamespaceType = idNamespaceType + } + + private enum CodingKeys: String, CodingKey { + case idMappingWorkflowsSupported = "idMappingWorkflowsSupported" + case idNamespaceType = "idNamespaceType" + } + } + + public struct IdNamespaceAssociationInputReferencePropertiesSummary: AWSDecodableShape { + /// The ID namespace type for this ID namespace association. + public let idNamespaceType: IdNamespaceType + + public init(idNamespaceType: IdNamespaceType) { + self.idNamespaceType = idNamespaceType + } + + private enum CodingKeys: String, CodingKey { + case idNamespaceType = "idNamespaceType" + } + } + + public struct IdNamespaceAssociationSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of this ID namespace association. + public let arn: String + /// The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association. + public let collaborationArn: String + /// The unique identifier of the collaboration that contains this ID namespace association. + public let collaborationId: String + /// The time at which this ID namespace association was created. + public let createTime: Date + /// The description of the ID namespace association. + public let description: String? + /// The unique identifier of this ID namespace association. + public let id: String + /// The input reference configuration details for this ID namespace association. + public let inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig + /// The input reference properties for this ID namespace association. + public let inputReferenceProperties: IdNamespaceAssociationInputReferencePropertiesSummary + /// The Amazon Resource Name (ARN) of the membership resource for this ID namespace association. + public let membershipArn: String + /// The unique identifier of the membership resource for this ID namespace association. + public let membershipId: String + /// The name of the ID namespace association. + public let name: String + /// The most recent time at which this ID namespace association has been updated. + public let updateTime: Date + + public init(arn: String, collaborationArn: String, collaborationId: String, createTime: Date, description: String? = nil, id: String, inputReferenceConfig: IdNamespaceAssociationInputReferenceConfig, inputReferenceProperties: IdNamespaceAssociationInputReferencePropertiesSummary, membershipArn: String, membershipId: String, name: String, updateTime: Date) { + self.arn = arn + self.collaborationArn = collaborationArn + self.collaborationId = collaborationId + self.createTime = createTime + self.description = description + self.id = id + self.inputReferenceConfig = inputReferenceConfig + self.inputReferenceProperties = inputReferenceProperties + self.membershipArn = membershipArn + self.membershipId = membershipId + self.name = name + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case collaborationArn = "collaborationArn" + case collaborationId = "collaborationId" + case createTime = "createTime" + case description = "description" + case id = "id" + case inputReferenceConfig = "inputReferenceConfig" + case inputReferenceProperties = "inputReferenceProperties" + case membershipArn = "membershipArn" + case membershipId = "membershipId" + case name = "name" + case updateTime = "updateTime" + } + } + public struct ListAnalysisTemplatesInput: AWSEncodableShape { /// The maximum size of the results that is returned per call. public let maxResults: Int? @@ -3636,6 +4932,57 @@ extension CleanRooms { } } + public struct ListCollaborationIdNamespaceAssociationsInput: AWSEncodableShape { + /// The unique identifier of the collaboration that contains the ID namespace associations that you want to retrieve. + public let collaborationIdentifier: String + /// The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.> + public let maxResults: Int? + /// The pagination token that's used to fetch the next set of results. + public let nextToken: String? + + public init(collaborationIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.collaborationIdentifier = collaborationIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.collaborationIdentifier, key: "collaborationIdentifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, max: 36) + try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, min: 36) + try self.validate(self.collaborationIdentifier, name: "collaborationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 10240) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListCollaborationIdNamespaceAssociationsOutput: AWSDecodableShape { + /// The summary information of the collaboration ID namespace associations that you requested. + public let collaborationIdNamespaceAssociationSummaries: [CollaborationIdNamespaceAssociationSummary] + /// The token value provided to access the next page of results. + public let nextToken: String? + + public init(collaborationIdNamespaceAssociationSummaries: [CollaborationIdNamespaceAssociationSummary], nextToken: String? = nil) { + self.collaborationIdNamespaceAssociationSummaries = collaborationIdNamespaceAssociationSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case collaborationIdNamespaceAssociationSummaries = "collaborationIdNamespaceAssociationSummaries" + case nextToken = "nextToken" + } + } + public struct ListCollaborationPrivacyBudgetTemplatesInput: AWSEncodableShape { /// A unique identifier for one of your collaborations. public let collaborationIdentifier: String @@ -3824,29 +5171,124 @@ extension CleanRooms { private enum CodingKeys: CodingKey {} } - public struct ListConfiguredAudienceModelAssociationsOutput: AWSDecodableShape { - /// Summaries of the configured audience model associations that you requested. - public let configuredAudienceModelAssociationSummaries: [ConfiguredAudienceModelAssociationSummary] - /// The token value provided to access the next page of results. + public struct ListConfiguredAudienceModelAssociationsOutput: AWSDecodableShape { + /// Summaries of the configured audience model associations that you requested. + public let configuredAudienceModelAssociationSummaries: [ConfiguredAudienceModelAssociationSummary] + /// The token value provided to access the next page of results. + public let nextToken: String? + + public init(configuredAudienceModelAssociationSummaries: [ConfiguredAudienceModelAssociationSummary], nextToken: String? = nil) { + self.configuredAudienceModelAssociationSummaries = configuredAudienceModelAssociationSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case configuredAudienceModelAssociationSummaries = "configuredAudienceModelAssociationSummaries" + case nextToken = "nextToken" + } + } + + public struct ListConfiguredTableAssociationsInput: AWSEncodableShape { + /// The maximum size of the results that is returned per call. + public let maxResults: Int? + /// A unique identifier for the membership to list configured table associations for. Currently accepts the membership ID. + public let membershipIdentifier: String + /// The token value retrieved from a previous call to access the next page of results. + public let nextToken: String? + + public init(maxResults: Int? = nil, membershipIdentifier: String, nextToken: String? = nil) { + self.maxResults = maxResults + self.membershipIdentifier = membershipIdentifier + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 10240) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListConfiguredTableAssociationsOutput: AWSDecodableShape { + /// The retrieved list of configured table associations. + public let configuredTableAssociationSummaries: [ConfiguredTableAssociationSummary] + /// The token value retrieved from a previous call to access the next page of results. + public let nextToken: String? + + public init(configuredTableAssociationSummaries: [ConfiguredTableAssociationSummary], nextToken: String? = nil) { + self.configuredTableAssociationSummaries = configuredTableAssociationSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case configuredTableAssociationSummaries = "configuredTableAssociationSummaries" + case nextToken = "nextToken" + } + } + + public struct ListConfiguredTablesInput: AWSEncodableShape { + /// The maximum size of the results that is returned per call. + public let maxResults: Int? + /// The token value retrieved from a previous call to access the next page of results. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 10240) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListConfiguredTablesOutput: AWSDecodableShape { + /// The configured tables listed by the request. + public let configuredTableSummaries: [ConfiguredTableSummary] + /// The token value retrieved from a previous call to access the next page of results. public let nextToken: String? - public init(configuredAudienceModelAssociationSummaries: [ConfiguredAudienceModelAssociationSummary], nextToken: String? = nil) { - self.configuredAudienceModelAssociationSummaries = configuredAudienceModelAssociationSummaries + public init(configuredTableSummaries: [ConfiguredTableSummary], nextToken: String? = nil) { + self.configuredTableSummaries = configuredTableSummaries self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case configuredAudienceModelAssociationSummaries = "configuredAudienceModelAssociationSummaries" + case configuredTableSummaries = "configuredTableSummaries" case nextToken = "nextToken" } } - public struct ListConfiguredTableAssociationsInput: AWSEncodableShape { - /// The maximum size of the results that is returned per call. + public struct ListIdMappingTablesInput: AWSEncodableShape { + /// The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met. public let maxResults: Int? - /// A unique identifier for the membership to list configured table associations for. Currently accepts the membership ID. + /// The unique identifier of the membership that contains the ID mapping tables that you want to view. public let membershipIdentifier: String - /// The token value retrieved from a previous call to access the next page of results. + /// The pagination token that's used to fetch the next set of results. public let nextToken: String? public init(maxResults: Int? = nil, membershipIdentifier: String, nextToken: String? = nil) { @@ -3875,31 +5317,34 @@ extension CleanRooms { private enum CodingKeys: CodingKey {} } - public struct ListConfiguredTableAssociationsOutput: AWSDecodableShape { - /// The retrieved list of configured table associations. - public let configuredTableAssociationSummaries: [ConfiguredTableAssociationSummary] - /// The token value retrieved from a previous call to access the next page of results. + public struct ListIdMappingTablesOutput: AWSDecodableShape { + /// The summary information of the ID mapping tables that you requested. + public let idMappingTableSummaries: [IdMappingTableSummary] + /// The token value provided to access the next page of results. public let nextToken: String? - public init(configuredTableAssociationSummaries: [ConfiguredTableAssociationSummary], nextToken: String? = nil) { - self.configuredTableAssociationSummaries = configuredTableAssociationSummaries + public init(idMappingTableSummaries: [IdMappingTableSummary], nextToken: String? = nil) { + self.idMappingTableSummaries = idMappingTableSummaries self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case configuredTableAssociationSummaries = "configuredTableAssociationSummaries" + case idMappingTableSummaries = "idMappingTableSummaries" case nextToken = "nextToken" } } - public struct ListConfiguredTablesInput: AWSEncodableShape { - /// The maximum size of the results that is returned per call. + public struct ListIdNamespaceAssociationsInput: AWSEncodableShape { + /// The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met. public let maxResults: Int? - /// The token value retrieved from a previous call to access the next page of results. + /// The unique identifier of the membership that contains the ID namespace association that you want to view. + public let membershipIdentifier: String + /// The pagination token that's used to fetch the next set of results. public let nextToken: String? - public init(maxResults: Int? = nil, nextToken: String? = nil) { + public init(maxResults: Int? = nil, membershipIdentifier: String, nextToken: String? = nil) { self.maxResults = maxResults + self.membershipIdentifier = membershipIdentifier self.nextToken = nextToken } @@ -3907,31 +5352,35 @@ extension CleanRooms { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodeQuery(self.maxResults, key: "maxResults") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") request.encodeQuery(self.nextToken, key: "nextToken") } public func validate(name: String) throws { try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") try self.validate(self.nextToken, name: "nextToken", parent: name, max: 10240) } private enum CodingKeys: CodingKey {} } - public struct ListConfiguredTablesOutput: AWSDecodableShape { - /// The configured tables listed by the request. - public let configuredTableSummaries: [ConfiguredTableSummary] - /// The token value retrieved from a previous call to access the next page of results. + public struct ListIdNamespaceAssociationsOutput: AWSDecodableShape { + /// The summary information of the ID namespace associations that you requested. + public let idNamespaceAssociationSummaries: [IdNamespaceAssociationSummary] + /// The token value provided to access the next page of results. public let nextToken: String? - public init(configuredTableSummaries: [ConfiguredTableSummary], nextToken: String? = nil) { - self.configuredTableSummaries = configuredTableSummaries + public init(idNamespaceAssociationSummaries: [IdNamespaceAssociationSummary], nextToken: String? = nil) { + self.idNamespaceAssociationSummaries = idNamespaceAssociationSummaries self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case configuredTableSummaries = "configuredTableSummaries" + case idNamespaceAssociationSummaries = "idNamespaceAssociationSummaries" case nextToken = "nextToken" } } @@ -4550,6 +5999,49 @@ extension CleanRooms { } } + public struct PopulateIdMappingTableInput: AWSEncodableShape { + /// The unique identifier of the ID mapping table that you want to populate. + public let idMappingTableIdentifier: String + /// The unique identifier of the membership that contains the ID mapping table that you want to populate. + public let membershipIdentifier: String + + public init(idMappingTableIdentifier: String, membershipIdentifier: String) { + self.idMappingTableIdentifier = idMappingTableIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.idMappingTableIdentifier, key: "idMappingTableIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, max: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, min: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct PopulateIdMappingTableOutput: AWSDecodableShape { + /// The unique identifier of the mapping job that will populate the ID mapping table. + public let idMappingJobId: String + + public init(idMappingJobId: String) { + self.idMappingJobId = idMappingJobId + } + + private enum CodingKeys: String, CodingKey { + case idMappingJobId = "idMappingJobId" + } + } + public struct PreviewPrivacyImpactInput: AWSEncodableShape { /// A unique identifier for one of your memberships for a collaboration. Accepts a membership ID. public let membershipIdentifier: String @@ -4663,7 +6155,7 @@ extension CleanRooms { public let membershipArn: String /// The identifier for a membership resource. public let membershipId: String - /// Specifies the epislon and noise parameters for the privacy budget template. + /// Specifies the epsilon and noise parameters for the privacy budget template. public let parameters: PrivacyBudgetTemplateParametersOutput /// Specifies the type of the privacy budget template. public let privacyBudgetType: PrivacyBudgetType @@ -4814,6 +6306,25 @@ extension CleanRooms { } } + public struct ProtectedQueryMemberOutputConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The unique identifier for the account. + public let accountId: String + + public init(accountId: String) { + self.accountId = accountId + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, max: 12) + try self.validate(self.accountId, name: "accountId", parent: name, min: 12) + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d+$") + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + } + } + public struct ProtectedQueryResult: AWSDecodableShape { /// The output of the protected query. public let output: ProtectedQueryOutput @@ -4929,7 +6440,7 @@ extension CleanRooms { } public struct ProtectedQueryStatistics: AWSDecodableShape { - /// The duration of the Protected Query, from creation until query completion. + /// The duration of the protected query, from creation until query completion. public let totalDurationInMillis: Int64? public init(totalDurationInMillis: Int64? = nil) { @@ -4950,14 +6461,17 @@ extension CleanRooms { public let membershipArn: String /// The unique ID for the membership that initiated the protected query. public let membershipId: String + /// The receiver configuration. + public let receiverConfigurations: [ReceiverConfiguration] /// The status of the protected query. Value values are `SUBMITTED`, `STARTED`, `CANCELLED`, `CANCELLING`, `FAILED`, `SUCCESS`, `TIMED_OUT`. public let status: ProtectedQueryStatus - public init(createTime: Date, id: String, membershipArn: String, membershipId: String, status: ProtectedQueryStatus) { + public init(createTime: Date, id: String, membershipArn: String, membershipId: String, receiverConfigurations: [ReceiverConfiguration], status: ProtectedQueryStatus) { self.createTime = createTime self.id = id self.membershipArn = membershipArn self.membershipId = membershipId + self.receiverConfigurations = receiverConfigurations self.status = status } @@ -4966,6 +6480,7 @@ extension CleanRooms { case id = "id" case membershipArn = "membershipArn" case membershipId = "membershipId" + case receiverConfigurations = "receiverConfigurations" case status = "status" } } @@ -4983,6 +6498,36 @@ extension CleanRooms { } } + public struct QueryConstraintRequireOverlap: AWSDecodableShape { + /// The columns that are required to overlap. + public let columns: [String]? + + public init(columns: [String]? = nil) { + self.columns = columns + } + + private enum CodingKeys: String, CodingKey { + case columns = "columns" + } + } + + public struct ReceiverConfiguration: AWSDecodableShape { + /// The type of analysis for the protected query. The results of the query can be analyzed directly (DIRECT_ANALYSIS) or used as input into additional analyses (ADDITIONAL_ANALYSIS), such as a query that is a seed for a lookalike ML model. + public let analysisType: AnalysisType + /// The configuration details of the receiver configuration. + public let configurationDetails: ConfigurationDetails? + + public init(analysisType: AnalysisType, configurationDetails: ConfigurationDetails? = nil) { + self.analysisType = analysisType + self.configurationDetails = configurationDetails + } + + private enum CodingKeys: String, CodingKey { + case analysisType = "analysisType" + case configurationDetails = "configurationDetails" + } + } + public struct Schema: AWSDecodableShape { /// The analysis method for the schema. The only valid value is currently DIRECT_QUERY. public let analysisMethod: AnalysisMethod? @@ -5006,12 +6551,14 @@ extension CleanRooms { public let partitionKeys: [Column] /// Details about the status of the schema. Currently, only one entry is present. public let schemaStatusDetails: [SchemaStatusDetail] + /// The schema type properties. + public let schemaTypeProperties: SchemaTypeProperties? /// The type of schema. The only valid value is currently `TABLE`. public let type: SchemaType /// The time the schema was last updated. public let updateTime: Date - public init(analysisMethod: AnalysisMethod? = nil, analysisRuleTypes: [AnalysisRuleType], collaborationArn: String, collaborationId: String, columns: [Column], createTime: Date, creatorAccountId: String, description: String, name: String, partitionKeys: [Column], schemaStatusDetails: [SchemaStatusDetail], type: SchemaType, updateTime: Date) { + public init(analysisMethod: AnalysisMethod? = nil, analysisRuleTypes: [AnalysisRuleType], collaborationArn: String, collaborationId: String, columns: [Column], createTime: Date, creatorAccountId: String, description: String, name: String, partitionKeys: [Column], schemaStatusDetails: [SchemaStatusDetail], schemaTypeProperties: SchemaTypeProperties? = nil, type: SchemaType, updateTime: Date) { self.analysisMethod = analysisMethod self.analysisRuleTypes = analysisRuleTypes self.collaborationArn = collaborationArn @@ -5023,6 +6570,7 @@ extension CleanRooms { self.name = name self.partitionKeys = partitionKeys self.schemaStatusDetails = schemaStatusDetails + self.schemaTypeProperties = schemaTypeProperties self.type = type self.updateTime = updateTime } @@ -5039,6 +6587,7 @@ extension CleanRooms { case name = "name" case partitionKeys = "partitionKeys" case schemaStatusDetails = "schemaStatusDetails" + case schemaTypeProperties = "schemaTypeProperties" case type = "type" case updateTime = "updateTime" } @@ -5069,15 +6618,18 @@ extension CleanRooms { public struct SchemaStatusDetail: AWSDecodableShape { /// The analysis rule type for which the schema status has been evaluated. public let analysisRuleType: AnalysisRuleType? + /// The type of analysis that can be performed on the schema. A schema can have an analysisType of DIRECT_ANALYSIS, ADDITIONAL_ANALYSIS_FOR_AUDIENCE_GENERATION, or both. + public let analysisType: AnalysisType /// The configuration details of the schema analysis rule for the given type. public let configurations: [SchemaConfiguration]? /// The reasons why the schema status is set to its current state. public let reasons: [SchemaStatusReason]? - /// The status of the schema. + /// The status of the schema, indicating if it is ready to query. public let status: SchemaStatus - public init(analysisRuleType: AnalysisRuleType? = nil, configurations: [SchemaConfiguration]? = nil, reasons: [SchemaStatusReason]? = nil, status: SchemaStatus) { + public init(analysisRuleType: AnalysisRuleType? = nil, analysisType: AnalysisType, configurations: [SchemaConfiguration]? = nil, reasons: [SchemaStatusReason]? = nil, status: SchemaStatus) { self.analysisRuleType = analysisRuleType + self.analysisType = analysisType self.configurations = configurations self.reasons = reasons self.status = status @@ -5085,6 +6637,7 @@ extension CleanRooms { private enum CodingKeys: String, CodingKey { case analysisRuleType = "analysisRuleType" + case analysisType = "analysisType" case configurations = "configurations" case reasons = "reasons" case status = "status" @@ -5489,6 +7042,60 @@ extension CleanRooms { } } + public struct UpdateConfiguredTableAssociationAnalysisRuleInput: AWSEncodableShape { + /// The updated analysis rule policy for the configured table association. + public let analysisRulePolicy: ConfiguredTableAssociationAnalysisRulePolicy + /// The analysis rule type that you want to update. + public let analysisRuleType: ConfiguredTableAssociationAnalysisRuleType + /// The identifier for the configured table association to update. + public let configuredTableAssociationIdentifier: String + /// A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID. + public let membershipIdentifier: String + + public init(analysisRulePolicy: ConfiguredTableAssociationAnalysisRulePolicy, analysisRuleType: ConfiguredTableAssociationAnalysisRuleType, configuredTableAssociationIdentifier: String, membershipIdentifier: String) { + self.analysisRulePolicy = analysisRulePolicy + self.analysisRuleType = analysisRuleType + self.configuredTableAssociationIdentifier = configuredTableAssociationIdentifier + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.analysisRulePolicy, forKey: .analysisRulePolicy) + request.encodePath(self.analysisRuleType, key: "analysisRuleType") + request.encodePath(self.configuredTableAssociationIdentifier, key: "configuredTableAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.analysisRulePolicy.validate(name: "\(name).analysisRulePolicy") + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, max: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, min: 36) + try self.validate(self.configuredTableAssociationIdentifier, name: "configuredTableAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case analysisRulePolicy = "analysisRulePolicy" + } + } + + public struct UpdateConfiguredTableAssociationAnalysisRuleOutput: AWSDecodableShape { + /// The updated analysis rule for the configured table association. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule. + public let analysisRule: ConfiguredTableAssociationAnalysisRule + + public init(analysisRule: ConfiguredTableAssociationAnalysisRule) { + self.analysisRule = analysisRule + } + + private enum CodingKeys: String, CodingKey { + case analysisRule = "analysisRule" + } + } + public struct UpdateConfiguredTableAssociationInput: AWSEncodableShape { /// The unique identifier for the configured table association to update. Currently accepts the configured table association ID. public let configuredTableAssociationIdentifier: String @@ -5600,6 +7207,129 @@ extension CleanRooms { } } + public struct UpdateIdMappingTableInput: AWSEncodableShape { + /// A new description for the ID mapping table. + public let description: String? + /// The unique identifier of the ID mapping table that you want to update. + public let idMappingTableIdentifier: String + /// The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. + public let kmsKeyArn: String? + /// The unique identifier of the membership that contains the ID mapping table that you want to update. + public let membershipIdentifier: String + + public init(description: String? = nil, idMappingTableIdentifier: String, kmsKeyArn: String? = nil, membershipIdentifier: String) { + self.description = description + self.idMappingTableIdentifier = idMappingTableIdentifier + self.kmsKeyArn = kmsKeyArn + self.membershipIdentifier = membershipIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.idMappingTableIdentifier, key: "idMappingTableIdentifier") + try container.encodeIfPresent(self.kmsKeyArn, forKey: .kmsKeyArn) + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*$") + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, max: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, min: 36) + try self.validate(self.idMappingTableIdentifier, name: "idMappingTableIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, min: 20) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:aws:kms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:key/[a-zA-Z0-9-]+$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case kmsKeyArn = "kmsKeyArn" + } + } + + public struct UpdateIdMappingTableOutput: AWSDecodableShape { + /// The updated ID mapping table. + public let idMappingTable: IdMappingTable + + public init(idMappingTable: IdMappingTable) { + self.idMappingTable = idMappingTable + } + + private enum CodingKeys: String, CodingKey { + case idMappingTable = "idMappingTable" + } + } + + public struct UpdateIdNamespaceAssociationInput: AWSEncodableShape { + /// A new description for the ID namespace association. + public let description: String? + /// The configuration settings for the ID mapping table. + public let idMappingConfig: IdMappingConfig? + /// The unique identifier of the ID namespace association that you want to update. + public let idNamespaceAssociationIdentifier: String + /// The unique identifier of the membership that contains the ID namespace association that you want to update. + public let membershipIdentifier: String + /// A new name for the ID namespace association. + public let name: String? + + public init(description: String? = nil, idMappingConfig: IdMappingConfig? = nil, idNamespaceAssociationIdentifier: String, membershipIdentifier: String, name: String? = nil) { + self.description = description + self.idMappingConfig = idMappingConfig + self.idNamespaceAssociationIdentifier = idNamespaceAssociationIdentifier + self.membershipIdentifier = membershipIdentifier + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.idMappingConfig, forKey: .idMappingConfig) + request.encodePath(self.idNamespaceAssociationIdentifier, key: "idNamespaceAssociationIdentifier") + request.encodePath(self.membershipIdentifier, key: "membershipIdentifier") + try container.encodeIfPresent(self.name, forKey: .name) + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*$") + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, max: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, min: 36) + try self.validate(self.idNamespaceAssociationIdentifier, name: "idNamespaceAssociationIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, max: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, min: 36) + try self.validate(self.membershipIdentifier, name: "membershipIdentifier", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 100) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case idMappingConfig = "idMappingConfig" + case name = "name" + } + } + + public struct UpdateIdNamespaceAssociationOutput: AWSDecodableShape { + /// The updated ID namespace association. + public let idNamespaceAssociation: IdNamespaceAssociation + + public init(idNamespaceAssociation: IdNamespaceAssociation) { + self.idNamespaceAssociation = idNamespaceAssociation + } + + private enum CodingKeys: String, CodingKey { + case idNamespaceAssociation = "idNamespaceAssociation" + } + } + public struct UpdateMembershipInput: AWSEncodableShape { /// The default protected query result configuration as specified by the member who can receive results. public let defaultResultConfiguration: MembershipProtectedQueryResultConfiguration? @@ -5781,6 +7511,19 @@ extension CleanRooms { } } + public struct ConfigurationDetails: AWSDecodableShape { + /// The direct analysis configuration details. + public let directAnalysisConfigurationDetails: DirectAnalysisConfigurationDetails? + + public init(directAnalysisConfigurationDetails: DirectAnalysisConfigurationDetails? = nil) { + self.directAnalysisConfigurationDetails = directAnalysisConfigurationDetails + } + + private enum CodingKeys: String, CodingKey { + case directAnalysisConfigurationDetails = "directAnalysisConfigurationDetails" + } + } + public struct ConfiguredTableAnalysisRulePolicy: AWSEncodableShape & AWSDecodableShape { /// Controls on the query specifications that can be run on a configured table. public let v1: ConfiguredTableAnalysisRulePolicyV1? @@ -5798,6 +7541,23 @@ extension CleanRooms { } } + public struct ConfiguredTableAssociationAnalysisRulePolicy: AWSEncodableShape & AWSDecodableShape { + /// The policy for the configured table association analysis rule. + public let v1: ConfiguredTableAssociationAnalysisRulePolicyV1? + + public init(v1: ConfiguredTableAssociationAnalysisRulePolicyV1? = nil) { + self.v1 = v1 + } + + public func validate(name: String) throws { + try self.v1?.validate(name: "\(name).v1") + } + + private enum CodingKeys: String, CodingKey { + case v1 = "v1" + } + } + public struct MembershipProtectedQueryOutputConfiguration: AWSEncodableShape & AWSDecodableShape { public let s3: ProtectedQueryS3OutputConfiguration? @@ -5904,20 +7664,29 @@ extension CleanRooms { } } - public struct ProtectedQueryOutputConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Required configuration for a protected query with an `S3` output type. - public let s3: ProtectedQueryS3OutputConfiguration? + public struct QueryConstraint: AWSDecodableShape { + /// An array of column names that specifies which columns are required in the JOIN statement. + public let requireOverlap: QueryConstraintRequireOverlap? - public init(s3: ProtectedQueryS3OutputConfiguration? = nil) { - self.s3 = s3 + public init(requireOverlap: QueryConstraintRequireOverlap? = nil) { + self.requireOverlap = requireOverlap } - public func validate(name: String) throws { - try self.s3?.validate(name: "\(name).s3") + private enum CodingKeys: String, CodingKey { + case requireOverlap = "requireOverlap" + } + } + + public struct SchemaTypeProperties: AWSDecodableShape { + /// The ID mapping table for the schema type properties. + public let idMappingTable: IdMappingTableSchemaTypeProperties? + + public init(idMappingTable: IdMappingTableSchemaTypeProperties? = nil) { + self.idMappingTable = idMappingTable } private enum CodingKeys: String, CodingKey { - case s3 = "s3" + case idMappingTable = "idMappingTable" } } diff --git a/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift b/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift index 65945f2883..b8ad81835f 100644 --- a/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift +++ b/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift @@ -168,25 +168,30 @@ extension CleanRoomsML { public struct AudienceGenerationJobDataSource: AWSEncodableShape & AWSDecodableShape { /// Defines the Amazon S3 bucket where the seed audience for the generating audience is stored. A valid data source is a JSON line file in the following format: {"user_id": "111111"} {"user_id": "222222"} ... - public let dataSource: S3ConfigMap - /// The ARN of the IAM role that can read the Amazon S3 bucket where the training data is stored. + public let dataSource: S3ConfigMap? + /// The ARN of the IAM role that can read the Amazon S3 bucket where the seed audience is stored. public let roleArn: String + /// The protected SQL query parameters. + public let sqlParameters: ProtectedQuerySQLParameters? - public init(dataSource: S3ConfigMap, roleArn: String) { + public init(dataSource: S3ConfigMap? = nil, roleArn: String, sqlParameters: ProtectedQuerySQLParameters? = nil) { self.dataSource = dataSource self.roleArn = roleArn + self.sqlParameters = sqlParameters } public func validate(name: String) throws { - try self.dataSource.validate(name: "\(name).dataSource") + try self.dataSource?.validate(name: "\(name).dataSource") try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws[-a-z]*:iam::[0-9]{12}:role/.+$") + try self.sqlParameters?.validate(name: "\(name).sqlParameters") } private enum CodingKeys: String, CodingKey { case dataSource = "dataSource" case roleArn = "roleArn" + case sqlParameters = "sqlParameters" } } @@ -856,6 +861,8 @@ extension CleanRoomsML { public let metrics: AudienceQualityMetrics? /// The name of the audience generation job. public let name: String + /// The unique identifier of the protected query for this audience generation job. + public let protectedQueryIdentifier: String? /// The seed audience that was used for this audience generation job. This field will be null if the account calling the API is the account that started this audience generation job. public let seedAudience: AudienceGenerationJobDataSource? /// The AWS account that started this audience generation job. @@ -869,7 +876,7 @@ extension CleanRoomsML { /// The most recent time at which the audience generation job was updated. public let updateTime: Date - public init(audienceGenerationJobArn: String, collaborationId: String? = nil, configuredAudienceModelArn: String, createTime: Date, description: String? = nil, includeSeedInOutput: Bool? = nil, metrics: AudienceQualityMetrics? = nil, name: String, seedAudience: AudienceGenerationJobDataSource? = nil, startedBy: String? = nil, status: AudienceGenerationJobStatus, statusDetails: StatusDetails? = nil, tags: [String: String]? = nil, updateTime: Date) { + public init(audienceGenerationJobArn: String, collaborationId: String? = nil, configuredAudienceModelArn: String, createTime: Date, description: String? = nil, includeSeedInOutput: Bool? = nil, metrics: AudienceQualityMetrics? = nil, name: String, protectedQueryIdentifier: String? = nil, seedAudience: AudienceGenerationJobDataSource? = nil, startedBy: String? = nil, status: AudienceGenerationJobStatus, statusDetails: StatusDetails? = nil, tags: [String: String]? = nil, updateTime: Date) { self.audienceGenerationJobArn = audienceGenerationJobArn self.collaborationId = collaborationId self.configuredAudienceModelArn = configuredAudienceModelArn @@ -878,6 +885,7 @@ extension CleanRoomsML { self.includeSeedInOutput = includeSeedInOutput self.metrics = metrics self.name = name + self.protectedQueryIdentifier = protectedQueryIdentifier self.seedAudience = seedAudience self.startedBy = startedBy self.status = status @@ -895,6 +903,7 @@ extension CleanRoomsML { case includeSeedInOutput = "includeSeedInOutput" case metrics = "metrics" case name = "name" + case protectedQueryIdentifier = "protectedQueryIdentifier" case seedAudience = "seedAudience" case startedBy = "startedBy" case status = "status" @@ -1495,6 +1504,38 @@ extension CleanRoomsML { } } + public struct ProtectedQuerySQLParameters: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) associated with the analysis template within a collaboration. + public let analysisTemplateArn: String? + /// The protected query SQL parameters. + public let parameters: [String: String]? + /// The query string to be submitted. + public let queryString: String? + + public init(analysisTemplateArn: String? = nil, parameters: [String: String]? = nil, queryString: String? = nil) { + self.analysisTemplateArn = analysisTemplateArn + self.parameters = parameters + self.queryString = queryString + } + + public func validate(name: String) throws { + try self.validate(self.analysisTemplateArn, name: "analysisTemplateArn", parent: name, max: 200) + try self.validate(self.analysisTemplateArn, name: "analysisTemplateArn", parent: name, pattern: "^arn:aws[-a-z]*:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+$") + try self.parameters?.forEach { + try validate($0.key, name: "parameters.key", parent: name, max: 100) + try validate($0.key, name: "parameters.key", parent: name, min: 1) + try validate($0.key, name: "parameters.key", parent: name, pattern: "^[0-9a-zA-Z_]+$") + try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, max: 250) + } + } + + private enum CodingKeys: String, CodingKey { + case analysisTemplateArn = "analysisTemplateArn" + case parameters = "parameters" + case queryString = "queryString" + } + } + public struct PutConfiguredAudienceModelPolicyRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the configured audience model that the resource policy will govern. public let configuredAudienceModelArn: String diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift index 14d146ae71..ba6419e74d 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift @@ -385,7 +385,7 @@ public struct CodeBuild: AWSService { ) } - /// Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. + /// Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. @Sendable public func importSourceCredentials(_ input: ImportSourceCredentialsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportSourceCredentialsOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift index 9b0600b332..66164448a1 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift @@ -50,6 +50,7 @@ extension CodeBuild { case codeconnections = "CODECONNECTIONS" case oauth = "OAUTH" case personalAccessToken = "PERSONAL_ACCESS_TOKEN" + case secretsManager = "SECRETS_MANAGER" public var description: String { return self.rawValue } } @@ -326,6 +327,7 @@ extension CodeBuild { public enum SourceAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case codeconnections = "CODECONNECTIONS" case oauth = "OAUTH" + case secretsManager = "SECRETS_MANAGER" public var description: String { return self.rawValue } } @@ -2139,13 +2141,13 @@ extension CodeBuild { } public struct ImportSourceCredentialsInput: AWSEncodableShape { - /// The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console. Note that CODECONNECTIONS is only valid for GitLab and GitLab Self Managed. + /// The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console. public let authType: AuthType /// The source provider used for this project. public let serverType: ServerType /// Set to false to prevent overwriting the repository source credentials. Set to true to overwrite the repository source credentials. The default value is true. public let shouldOverwrite: Bool? - /// For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the authType CODECONNECTIONS, this is the connectionArn. + /// For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the authType CODECONNECTIONS, this is the connectionArn. For the authType SECRETS_MANAGER, this is the secretArn. public let token: String /// The Bitbucket username when the authType is BASIC_AUTH. This parameter is not valid for other types of source providers or connections. public let username: String? @@ -3172,7 +3174,7 @@ extension CodeBuild { } public struct ProjectSource: AWSEncodableShape & AWSDecodableShape { - /// Information about the authorization settings for CodeBuild to access the source code to be built. This information is for the CodeBuild console's use only. Your code should not get or set this information directly. + /// Information about the authorization settings for CodeBuild to access the source code to be built. public let auth: SourceAuth? /// The buildspec file declaration to use for the builds in this build project. If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Amazon Web Services Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location. public let buildspec: String? @@ -3695,7 +3697,7 @@ extension CodeBuild { public struct SourceAuth: AWSEncodableShape & AWSDecodableShape { /// The resource value that applies to the specified authorization type. public let resource: String? - /// The authorization type to use. Valid options are OAUTH or CODECONNECTIONS. + /// The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER. public let type: SourceAuthType public init(resource: String? = nil, type: SourceAuthType) { @@ -3712,9 +3714,9 @@ extension CodeBuild { public struct SourceCredentialsInfo: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the token. public let arn: String? - /// The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS. + /// The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER. public let authType: AuthType? - /// The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS. + /// The connection ARN if your authType is CODECONNECTIONS or SECRETS_MANAGER. public let resource: String? /// The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET. public let serverType: ServerType? diff --git a/Sources/Soto/Services/CodeCommit/CodeCommit_shapes.swift b/Sources/Soto/Services/CodeCommit/CodeCommit_shapes.swift index 6274ee798c..f339b36f26 100644 --- a/Sources/Soto/Services/CodeCommit/CodeCommit_shapes.swift +++ b/Sources/Soto/Services/CodeCommit/CodeCommit_shapes.swift @@ -5311,6 +5311,7 @@ public struct CodeCommitErrorType: AWSErrorType { case noChangeException = "NoChangeException" case numberOfRuleTemplatesExceededException = "NumberOfRuleTemplatesExceededException" case numberOfRulesExceededException = "NumberOfRulesExceededException" + case operationNotAllowedException = "OperationNotAllowedException" case overrideAlreadySetException = "OverrideAlreadySetException" case overrideStatusRequiredException = "OverrideStatusRequiredException" case parentCommitDoesNotExistException = "ParentCommitDoesNotExistException" @@ -5659,6 +5660,8 @@ public struct CodeCommitErrorType: AWSErrorType { public static var numberOfRuleTemplatesExceededException: Self { .init(.numberOfRuleTemplatesExceededException) } /// The approval rule cannot be added. The pull request has the maximum number of approval rules associated with it. public static var numberOfRulesExceededException: Self { .init(.numberOfRulesExceededException) } + /// The requested action is not allowed. + public static var operationNotAllowedException: Self { .init(.operationNotAllowedException) } /// The pull request has already had its approval rules set to override. public static var overrideAlreadySetException: Self { .init(.overrideAlreadySetException) } /// An override status is required, but no value was provided. Valid values include OVERRIDE and REVOKE. diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift index 51b0acfa4c..b444535580 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift @@ -348,6 +348,32 @@ public struct CodePipeline: AWSService { ) } + /// Lists the rule executions that have occurred in a pipeline configured for conditions with rules. + @Sendable + public func listRuleExecutions(_ input: ListRuleExecutionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRuleExecutionsOutput { + return try await self.client.execute( + operation: "ListRuleExecutions", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the rules for the condition. + @Sendable + public func listRuleTypes(_ input: ListRuleTypesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRuleTypesOutput { + return try await self.client.execute( + operation: "ListRuleTypes", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets the set of key-value pairs (metadata) that are used to manage the resource. @Sendable public func listTagsForResource(_ input: ListTagsForResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceOutput { @@ -374,6 +400,19 @@ public struct CodePipeline: AWSService { ) } + /// Used to override a stage condition. + @Sendable + public func overrideStageCondition(_ input: OverrideStageConditionInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "OverrideStageCondition", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about any jobs for CodePipeline to act on. PollForJobs is valid only for action types with "Custom" in the owner field. If the action type contains AWS or ThirdParty in the owner field, the PollForJobs action returns an error. When this API is called, CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action. @Sendable public func pollForJobs(_ input: PollForJobsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PollForJobsOutput { @@ -698,6 +737,25 @@ extension CodePipeline { ) } + /// Lists the rule executions that have occurred in a pipeline configured for conditions with rules. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listRuleExecutionsPaginator( + _ input: ListRuleExecutionsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listRuleExecutions, + inputKey: \ListRuleExecutionsInput.nextToken, + outputKey: \ListRuleExecutionsOutput.nextToken, + logger: logger + ) + } + /// Gets the set of key-value pairs (metadata) that are used to manage the resource. /// Return PaginatorSequence for operation. /// @@ -778,6 +836,17 @@ extension CodePipeline.ListPipelinesInput: AWSPaginateToken { } } +extension CodePipeline.ListRuleExecutionsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> CodePipeline.ListRuleExecutionsInput { + return .init( + filter: self.filter, + maxResults: self.maxResults, + nextToken: token, + pipelineName: self.pipelineName + ) + } +} + extension CodePipeline.ListTagsForResourceInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> CodePipeline.ListTagsForResourceInput { return .init( diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift index f084fc42b6..d9d9e92918 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift @@ -79,6 +79,23 @@ extension CodePipeline { public var description: String { return self.rawValue } } + public enum ConditionExecutionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case abandoned = "Abandoned" + case cancelled = "Cancelled" + case errored = "Errored" + case failed = "Failed" + case inProgress = "InProgress" + case overridden = "Overridden" + case succeeded = "Succeeded" + public var description: String { return self.rawValue } + } + + public enum ConditionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case beforeEntry = "BEFORE_ENTRY" + case onSuccess = "ON_SUCCESS" + public var description: String { return self.rawValue } + } + public enum EncryptionKeyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case kms = "KMS" public var description: String { return self.rawValue } @@ -154,10 +171,36 @@ extension CodePipeline { } public enum Result: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fail = "FAIL" case rollback = "ROLLBACK" public var description: String { return self.rawValue } } + public enum RuleCategory: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case rule = "Rule" + public var description: String { return self.rawValue } + } + + public enum RuleConfigurationPropertyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case boolean = "Boolean" + case number = "Number" + case string = "String" + public var description: String { return self.rawValue } + } + + public enum RuleExecutionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case abandoned = "Abandoned" + case failed = "Failed" + case inProgress = "InProgress" + case succeeded = "Succeeded" + public var description: String { return self.rawValue } + } + + public enum RuleOwner: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case aws = "AWS" + public var description: String { return self.rawValue } + } + public enum SourceRevisionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case commitId = "COMMIT_ID" case imageDigest = "IMAGE_DIGEST" @@ -1236,6 +1279,27 @@ extension CodePipeline { } } + public struct BeforeEntryConditions: AWSEncodableShape & AWSDecodableShape { + /// The conditions that are configured as entry conditions. + public let conditions: [Condition] + + public init(conditions: [Condition]) { + self.conditions = conditions + } + + public func validate(name: String) throws { + try self.conditions.forEach { + try $0.validate(name: "\(name).conditions[]") + } + try self.validate(self.conditions, name: "conditions", parent: name, max: 1) + try self.validate(self.conditions, name: "conditions", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case conditions = "conditions" + } + } + public struct BlockerDeclaration: AWSEncodableShape & AWSDecodableShape { /// Reserved for future use. public let name: String @@ -1258,6 +1322,69 @@ extension CodePipeline { } } + public struct Condition: AWSEncodableShape & AWSDecodableShape { + /// The action to be done when the condition is met. For example, rolling back an execution for a failure condition. + public let result: Result? + /// The rules that make up the condition. + public let rules: [RuleDeclaration]? + + public init(result: Result? = nil, rules: [RuleDeclaration]? = nil) { + self.result = result + self.rules = rules + } + + public func validate(name: String) throws { + try self.rules?.forEach { + try $0.validate(name: "\(name).rules[]") + } + try self.validate(self.rules, name: "rules", parent: name, max: 5) + try self.validate(self.rules, name: "rules", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case result = "result" + case rules = "rules" + } + } + + public struct ConditionExecution: AWSDecodableShape { + /// The last status change of the condition. + public let lastStatusChange: Date? + /// The status of the run for a condition. + public let status: ConditionExecutionStatus? + /// The summary of information about a run for a condition. + public let summary: String? + + public init(lastStatusChange: Date? = nil, status: ConditionExecutionStatus? = nil, summary: String? = nil) { + self.lastStatusChange = lastStatusChange + self.status = status + self.summary = summary + } + + private enum CodingKeys: String, CodingKey { + case lastStatusChange = "lastStatusChange" + case status = "status" + case summary = "summary" + } + } + + public struct ConditionState: AWSDecodableShape { + /// The state of the latest run of the rule. + public let latestExecution: ConditionExecution? + /// The state of the rules for the condition. + public let ruleStates: [RuleState]? + + public init(latestExecution: ConditionExecution? = nil, ruleStates: [RuleState]? = nil) { + self.latestExecution = latestExecution + self.ruleStates = ruleStates + } + + private enum CodingKeys: String, CodingKey { + case latestExecution = "latestExecution" + case ruleStates = "ruleStates" + } + } + public struct CreateCustomActionTypeInput: AWSEncodableShape { /// The category of the custom action, such as a build action or a test action. public let category: ActionCategory @@ -1681,14 +1808,26 @@ extension CodePipeline { } public struct FailureConditions: AWSEncodableShape & AWSDecodableShape { + /// The conditions that are configured as failure conditions. + public let conditions: [Condition]? /// The specified result for when the failure conditions are met, such as rolling back the stage. public let result: Result? - public init(result: Result? = nil) { + public init(conditions: [Condition]? = nil, result: Result? = nil) { + self.conditions = conditions self.result = result } + public func validate(name: String) throws { + try self.conditions?.forEach { + try $0.validate(name: "\(name).conditions[]") + } + try self.validate(self.conditions, name: "conditions", parent: name, max: 1) + try self.validate(self.conditions, name: "conditions", parent: name, min: 1) + } + private enum CodingKeys: String, CodingKey { + case conditions = "conditions" case result = "result" } } @@ -2523,6 +2662,94 @@ extension CodePipeline { } } + public struct ListRuleExecutionsInput: AWSEncodableShape { + /// Input information used to filter rule execution history. + public let filter: RuleExecutionFilter? + /// The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Pipeline history is limited to the most recent 12 months, based on pipeline execution start times. Default value is 100. + public let maxResults: Int? + /// The token that was returned from the previous ListRuleExecutions call, which can be used to return the next set of rule executions in the list. + public let nextToken: String? + /// The name of the pipeline for which you want to get execution summary information. + public let pipelineName: String + + public init(filter: RuleExecutionFilter? = nil, maxResults: Int? = nil, nextToken: String? = nil, pipelineName: String) { + self.filter = filter + self.maxResults = maxResults + self.nextToken = nextToken + self.pipelineName = pipelineName + } + + public func validate(name: String) throws { + try self.filter?.validate(name: "\(name).filter") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pipelineName, name: "pipelineName", parent: name, max: 100) + try self.validate(self.pipelineName, name: "pipelineName", parent: name, min: 1) + try self.validate(self.pipelineName, name: "pipelineName", parent: name, pattern: "^[A-Za-z0-9.@\\-_]+$") + } + + private enum CodingKeys: String, CodingKey { + case filter = "filter" + case maxResults = "maxResults" + case nextToken = "nextToken" + case pipelineName = "pipelineName" + } + } + + public struct ListRuleExecutionsOutput: AWSDecodableShape { + /// A token that can be used in the next ListRuleExecutions call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned. + public let nextToken: String? + /// Details about the output for listing rule executions. + public let ruleExecutionDetails: [RuleExecutionDetail]? + + public init(nextToken: String? = nil, ruleExecutionDetails: [RuleExecutionDetail]? = nil) { + self.nextToken = nextToken + self.ruleExecutionDetails = ruleExecutionDetails + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case ruleExecutionDetails = "ruleExecutionDetails" + } + } + + public struct ListRuleTypesInput: AWSEncodableShape { + /// The rule Region to filter on. + public let regionFilter: String? + /// The rule owner to filter on. + public let ruleOwnerFilter: RuleOwner? + + public init(regionFilter: String? = nil, ruleOwnerFilter: RuleOwner? = nil) { + self.regionFilter = regionFilter + self.ruleOwnerFilter = ruleOwnerFilter + } + + public func validate(name: String) throws { + try self.validate(self.regionFilter, name: "regionFilter", parent: name, max: 30) + try self.validate(self.regionFilter, name: "regionFilter", parent: name, min: 4) + } + + private enum CodingKeys: String, CodingKey { + case regionFilter = "regionFilter" + case ruleOwnerFilter = "ruleOwnerFilter" + } + } + + public struct ListRuleTypesOutput: AWSDecodableShape { + /// Lists the rules that are configured for the condition. + public let ruleTypes: [RuleType] + + public init(ruleTypes: [RuleType]) { + self.ruleTypes = ruleTypes + } + + private enum CodingKeys: String, CodingKey { + case ruleTypes = "ruleTypes" + } + } + public struct ListTagsForResourceInput: AWSEncodableShape { /// The maximum number of results to return in a single call. public let maxResults: Int? @@ -2666,6 +2893,41 @@ extension CodePipeline { } } + public struct OverrideStageConditionInput: AWSEncodableShape { + /// The type of condition to override for the stage, such as entry conditions, failure conditions, or success conditions. + public let conditionType: ConditionType + /// The ID of the pipeline execution for the override. + public let pipelineExecutionId: String + /// The name of the pipeline with the stage that will override the condition. + public let pipelineName: String + /// The name of the stage for the override. + public let stageName: String + + public init(conditionType: ConditionType, pipelineExecutionId: String, pipelineName: String, stageName: String) { + self.conditionType = conditionType + self.pipelineExecutionId = pipelineExecutionId + self.pipelineName = pipelineName + self.stageName = stageName + } + + public func validate(name: String) throws { + try self.validate(self.pipelineExecutionId, name: "pipelineExecutionId", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.pipelineName, name: "pipelineName", parent: name, max: 100) + try self.validate(self.pipelineName, name: "pipelineName", parent: name, min: 1) + try self.validate(self.pipelineName, name: "pipelineName", parent: name, pattern: "^[A-Za-z0-9.@\\-_]+$") + try self.validate(self.stageName, name: "stageName", parent: name, max: 100) + try self.validate(self.stageName, name: "stageName", parent: name, min: 1) + try self.validate(self.stageName, name: "stageName", parent: name, pattern: "^[A-Za-z0-9.@\\-_]+$") + } + + private enum CodingKeys: String, CodingKey { + case conditionType = "conditionType" + case pipelineExecutionId = "pipelineExecutionId" + case pipelineName = "pipelineName" + case stageName = "stageName" + } + } + public struct PipelineContext: AWSDecodableShape { /// The context of an action to a job worker in the stage of a pipeline. public let action: ActionContext? @@ -3535,6 +3797,424 @@ extension CodePipeline { } } + public struct RuleConfigurationProperty: AWSDecodableShape { + /// The description of the action configuration property that is displayed to users. + public let description: String? + /// Whether the configuration property is a key. + public let key: Bool + /// The name of the rule configuration property. + public let name: String + /// Indicates whether the property can be queried. If you create a pipeline with a condition and rule, and that rule contains a queryable property, the value for that configuration property is subject to other restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens. + public let queryable: Bool? + /// Whether the configuration property is a required value. + public let required: Bool + /// Whether the configuration property is secret. When updating a pipeline, passing * * * * * without changing any other values of the action preserves the previous value of the secret. + public let secret: Bool + /// The type of the configuration property. + public let type: RuleConfigurationPropertyType? + + public init(description: String? = nil, key: Bool, name: String, queryable: Bool? = nil, required: Bool, secret: Bool, type: RuleConfigurationPropertyType? = nil) { + self.description = description + self.key = key + self.name = name + self.queryable = queryable + self.required = required + self.secret = secret + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case key = "key" + case name = "name" + case queryable = "queryable" + case required = "required" + case secret = "secret" + case type = "type" + } + } + + public struct RuleDeclaration: AWSEncodableShape & AWSDecodableShape { + /// The action configuration fields for the rule. + public let configuration: [String: String]? + /// The input artifacts fields for the rule, such as specifying an input file for the rule. + public let inputArtifacts: [InputArtifact]? + /// The name of the rule that is created for the condition, such as CheckAllResults. + public let name: String + /// The Region for the condition associated with the rule. + public let region: String? + /// The pipeline role ARN associated with the rule. + public let roleArn: String? + /// The ID for the rule type, which is made up of the combined values for category, owner, provider, and version. + public let ruleTypeId: RuleTypeId + /// The action timeout for the rule. + public let timeoutInMinutes: Int? + + public init(configuration: [String: String]? = nil, inputArtifacts: [InputArtifact]? = nil, name: String, region: String? = nil, roleArn: String? = nil, ruleTypeId: RuleTypeId, timeoutInMinutes: Int? = nil) { + self.configuration = configuration + self.inputArtifacts = inputArtifacts + self.name = name + self.region = region + self.roleArn = roleArn + self.ruleTypeId = ruleTypeId + self.timeoutInMinutes = timeoutInMinutes + } + + public func validate(name: String) throws { + try self.configuration?.forEach { + try validate($0.key, name: "configuration.key", parent: name, max: 50) + try validate($0.key, name: "configuration.key", parent: name, min: 1) + try validate($0.value, name: "configuration[\"\($0.key)\"]", parent: name, max: 10000) + try validate($0.value, name: "configuration[\"\($0.key)\"]", parent: name, min: 1) + } + try self.validate(self.configuration, name: "configuration", parent: name, max: 200) + try self.inputArtifacts?.forEach { + try $0.validate(name: "\(name).inputArtifacts[]") + } + try self.validate(self.name, name: "name", parent: name, max: 100) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9.@\\-_]+$") + try self.validate(self.region, name: "region", parent: name, max: 30) + try self.validate(self.region, name: "region", parent: name, min: 4) + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 1024) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws(-[\\w]+)*:iam::[0-9]{12}:role/") + try self.ruleTypeId.validate(name: "\(name).ruleTypeId") + try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, max: 86400) + try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, min: 5) + } + + private enum CodingKeys: String, CodingKey { + case configuration = "configuration" + case inputArtifacts = "inputArtifacts" + case name = "name" + case region = "region" + case roleArn = "roleArn" + case ruleTypeId = "ruleTypeId" + case timeoutInMinutes = "timeoutInMinutes" + } + } + + public struct RuleExecution: AWSDecodableShape { + public let errorDetails: ErrorDetails? + /// The external ID of the run of the rule. + public let externalExecutionId: String? + /// The URL of a resource external to Amazon Web Services that is used when running the rule (for example, an external repository URL). + public let externalExecutionUrl: String? + /// The last status change of the rule. + public let lastStatusChange: Date? + /// The ARN of the user who last changed the rule. + public let lastUpdatedBy: String? + /// The execution ID for the run of the rule. + public let ruleExecutionId: String? + /// The status of the run of the rule, such as FAILED. + public let status: RuleExecutionStatus? + /// A summary of the run of the rule. + public let summary: String? + /// The system-generated token used to identify a unique request. + public let token: String? + + public init(errorDetails: ErrorDetails? = nil, externalExecutionId: String? = nil, externalExecutionUrl: String? = nil, lastStatusChange: Date? = nil, lastUpdatedBy: String? = nil, ruleExecutionId: String? = nil, status: RuleExecutionStatus? = nil, summary: String? = nil, token: String? = nil) { + self.errorDetails = errorDetails + self.externalExecutionId = externalExecutionId + self.externalExecutionUrl = externalExecutionUrl + self.lastStatusChange = lastStatusChange + self.lastUpdatedBy = lastUpdatedBy + self.ruleExecutionId = ruleExecutionId + self.status = status + self.summary = summary + self.token = token + } + + private enum CodingKeys: String, CodingKey { + case errorDetails = "errorDetails" + case externalExecutionId = "externalExecutionId" + case externalExecutionUrl = "externalExecutionUrl" + case lastStatusChange = "lastStatusChange" + case lastUpdatedBy = "lastUpdatedBy" + case ruleExecutionId = "ruleExecutionId" + case status = "status" + case summary = "summary" + case token = "token" + } + } + + public struct RuleExecutionDetail: AWSDecodableShape { + /// Input details for the rule execution, such as role ARN, Region, and input artifacts. + public let input: RuleExecutionInput? + /// The date and time of the last change to the rule execution, in timestamp format. + public let lastUpdateTime: Date? + /// Output details for the rule execution, such as the rule execution result. + public let output: RuleExecutionOutput? + /// The ID of the pipeline execution in the stage where the rule was run. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the stage. + public let pipelineExecutionId: String? + /// The version number of the pipeline with the stage where the rule was run. + public let pipelineVersion: Int? + /// The ID of the run for the rule. + public let ruleExecutionId: String? + /// The name of the rule that was run in the stage. + public let ruleName: String? + /// The name of the stage where the rule was run. + public let stageName: String? + /// The start time of the rule execution. + public let startTime: Date? + /// The status of the rule execution. Status categories are InProgress, Succeeded, and Failed. + public let status: RuleExecutionStatus? + /// The ARN of the user who changed the rule execution details. + public let updatedBy: String? + + public init(input: RuleExecutionInput? = nil, lastUpdateTime: Date? = nil, output: RuleExecutionOutput? = nil, pipelineExecutionId: String? = nil, pipelineVersion: Int? = nil, ruleExecutionId: String? = nil, ruleName: String? = nil, stageName: String? = nil, startTime: Date? = nil, status: RuleExecutionStatus? = nil, updatedBy: String? = nil) { + self.input = input + self.lastUpdateTime = lastUpdateTime + self.output = output + self.pipelineExecutionId = pipelineExecutionId + self.pipelineVersion = pipelineVersion + self.ruleExecutionId = ruleExecutionId + self.ruleName = ruleName + self.stageName = stageName + self.startTime = startTime + self.status = status + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case input = "input" + case lastUpdateTime = "lastUpdateTime" + case output = "output" + case pipelineExecutionId = "pipelineExecutionId" + case pipelineVersion = "pipelineVersion" + case ruleExecutionId = "ruleExecutionId" + case ruleName = "ruleName" + case stageName = "stageName" + case startTime = "startTime" + case status = "status" + case updatedBy = "updatedBy" + } + } + + public struct RuleExecutionFilter: AWSEncodableShape { + public let latestInPipelineExecution: LatestInPipelineExecutionFilter? + /// The pipeline execution ID used to filter rule execution history. + public let pipelineExecutionId: String? + + public init(latestInPipelineExecution: LatestInPipelineExecutionFilter? = nil, pipelineExecutionId: String? = nil) { + self.latestInPipelineExecution = latestInPipelineExecution + self.pipelineExecutionId = pipelineExecutionId + } + + public func validate(name: String) throws { + try self.latestInPipelineExecution?.validate(name: "\(name).latestInPipelineExecution") + try self.validate(self.pipelineExecutionId, name: "pipelineExecutionId", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case latestInPipelineExecution = "latestInPipelineExecution" + case pipelineExecutionId = "pipelineExecutionId" + } + } + + public struct RuleExecutionInput: AWSDecodableShape { + /// Configuration data for a rule execution, such as the resolved values for that run. + public let configuration: [String: String]? + /// Details of input artifacts of the rule that correspond to the rule execution. + public let inputArtifacts: [ArtifactDetail]? + /// The Amazon Web Services Region for the rule, such as us-east-1. + public let region: String? + /// Configuration data for a rule execution with all variable references replaced with their real values for the execution. + public let resolvedConfiguration: [String: String]? + /// The ARN of the IAM service role that performs the declared rule. This is assumed through the roleArn for the pipeline. + public let roleArn: String? + /// The ID for the rule type, which is made up of the combined values for category, owner, provider, and version. + public let ruleTypeId: RuleTypeId? + + public init(configuration: [String: String]? = nil, inputArtifacts: [ArtifactDetail]? = nil, region: String? = nil, resolvedConfiguration: [String: String]? = nil, roleArn: String? = nil, ruleTypeId: RuleTypeId? = nil) { + self.configuration = configuration + self.inputArtifacts = inputArtifacts + self.region = region + self.resolvedConfiguration = resolvedConfiguration + self.roleArn = roleArn + self.ruleTypeId = ruleTypeId + } + + private enum CodingKeys: String, CodingKey { + case configuration = "configuration" + case inputArtifacts = "inputArtifacts" + case region = "region" + case resolvedConfiguration = "resolvedConfiguration" + case roleArn = "roleArn" + case ruleTypeId = "ruleTypeId" + } + } + + public struct RuleExecutionOutput: AWSDecodableShape { + /// Execution result information listed in the output details for a rule execution. + public let executionResult: RuleExecutionResult? + + public init(executionResult: RuleExecutionResult? = nil) { + self.executionResult = executionResult + } + + private enum CodingKeys: String, CodingKey { + case executionResult = "executionResult" + } + } + + public struct RuleExecutionResult: AWSDecodableShape { + public let errorDetails: ErrorDetails? + /// The external ID for the rule execution. + public let externalExecutionId: String? + /// The external provider summary for the rule execution. + public let externalExecutionSummary: String? + /// The deepest external link to the external resource (for example, a repository URL or deployment endpoint) that is used when running the rule. + public let externalExecutionUrl: String? + + public init(errorDetails: ErrorDetails? = nil, externalExecutionId: String? = nil, externalExecutionSummary: String? = nil, externalExecutionUrl: String? = nil) { + self.errorDetails = errorDetails + self.externalExecutionId = externalExecutionId + self.externalExecutionSummary = externalExecutionSummary + self.externalExecutionUrl = externalExecutionUrl + } + + private enum CodingKeys: String, CodingKey { + case errorDetails = "errorDetails" + case externalExecutionId = "externalExecutionId" + case externalExecutionSummary = "externalExecutionSummary" + case externalExecutionUrl = "externalExecutionUrl" + } + } + + public struct RuleRevision: AWSDecodableShape { + /// The date and time when the most recent version of the rule was created, in timestamp format. + public let created: Date + /// The unique identifier of the change that set the state to this revision (for example, a deployment ID or timestamp). + public let revisionChangeId: String + /// The system-generated unique ID that identifies the revision number of the rule. + public let revisionId: String + + public init(created: Date, revisionChangeId: String, revisionId: String) { + self.created = created + self.revisionChangeId = revisionChangeId + self.revisionId = revisionId + } + + private enum CodingKeys: String, CodingKey { + case created = "created" + case revisionChangeId = "revisionChangeId" + case revisionId = "revisionId" + } + } + + public struct RuleState: AWSDecodableShape { + /// The ID of the current revision of the artifact successfully worked on by the job. + public let currentRevision: RuleRevision? + /// A URL link for more information about the state of the action, such as a details page. + public let entityUrl: String? + /// Represents information about the latest run of an rule. + public let latestExecution: RuleExecution? + /// A URL link for more information about the revision, such as a commit details page. + public let revisionUrl: String? + /// The name of the rule. + public let ruleName: String? + + public init(currentRevision: RuleRevision? = nil, entityUrl: String? = nil, latestExecution: RuleExecution? = nil, revisionUrl: String? = nil, ruleName: String? = nil) { + self.currentRevision = currentRevision + self.entityUrl = entityUrl + self.latestExecution = latestExecution + self.revisionUrl = revisionUrl + self.ruleName = ruleName + } + + private enum CodingKeys: String, CodingKey { + case currentRevision = "currentRevision" + case entityUrl = "entityUrl" + case latestExecution = "latestExecution" + case revisionUrl = "revisionUrl" + case ruleName = "ruleName" + } + } + + public struct RuleType: AWSDecodableShape { + /// Represents information about a rule type. + public let id: RuleTypeId + public let inputArtifactDetails: ArtifactDetails + /// The configuration properties for the rule type. + public let ruleConfigurationProperties: [RuleConfigurationProperty]? + /// Returns information about the settings for a rule type. + public let settings: RuleTypeSettings? + + public init(id: RuleTypeId, inputArtifactDetails: ArtifactDetails, ruleConfigurationProperties: [RuleConfigurationProperty]? = nil, settings: RuleTypeSettings? = nil) { + self.id = id + self.inputArtifactDetails = inputArtifactDetails + self.ruleConfigurationProperties = ruleConfigurationProperties + self.settings = settings + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case inputArtifactDetails = "inputArtifactDetails" + case ruleConfigurationProperties = "ruleConfigurationProperties" + case settings = "settings" + } + } + + public struct RuleTypeId: AWSEncodableShape & AWSDecodableShape { + /// A category defines what kind of rule can be run in the stage, and constrains the provider type for the rule. Valid categories are limited to one of the following values. INVOKE Approval Rule + public let category: RuleCategory + /// The creator of the rule being called. The valid value for the Owner field in the rule category is AWS. + public let owner: RuleOwner? + /// The provider of the service being called by the rule. Valid providers are determined by the rulecategory. For example, a managed rule in the Rule category type has an owner of AWS, which would be specified as AWS. + public let provider: String + /// A string that describes the rule version. + public let version: String? + + public init(category: RuleCategory, owner: RuleOwner? = nil, provider: String, version: String? = nil) { + self.category = category + self.owner = owner + self.provider = provider + self.version = version + } + + public func validate(name: String) throws { + try self.validate(self.provider, name: "provider", parent: name, max: 35) + try self.validate(self.provider, name: "provider", parent: name, min: 1) + try self.validate(self.provider, name: "provider", parent: name, pattern: "^[0-9A-Za-z_-]+$") + try self.validate(self.version, name: "version", parent: name, max: 9) + try self.validate(self.version, name: "version", parent: name, min: 1) + try self.validate(self.version, name: "version", parent: name, pattern: "^[0-9A-Za-z_-]+$") + } + + private enum CodingKeys: String, CodingKey { + case category = "category" + case owner = "owner" + case provider = "provider" + case version = "version" + } + } + + public struct RuleTypeSettings: AWSDecodableShape { + /// The URL returned to the CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for a CodeDeploy deployment group. This link is provided as part of the action display in the pipeline. + public let entityUrlTemplate: String? + /// The URL returned to the CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for CodeDeploy. This link is shown on the pipeline view page in the CodePipeline console and provides a link to the execution entity of the external action. + public let executionUrlTemplate: String? + /// The URL returned to the CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action. + public let revisionUrlTemplate: String? + /// The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service. + public let thirdPartyConfigurationUrl: String? + + public init(entityUrlTemplate: String? = nil, executionUrlTemplate: String? = nil, revisionUrlTemplate: String? = nil, thirdPartyConfigurationUrl: String? = nil) { + self.entityUrlTemplate = entityUrlTemplate + self.executionUrlTemplate = executionUrlTemplate + self.revisionUrlTemplate = revisionUrlTemplate + self.thirdPartyConfigurationUrl = thirdPartyConfigurationUrl + } + + private enum CodingKeys: String, CodingKey { + case entityUrlTemplate = "entityUrlTemplate" + case executionUrlTemplate = "executionUrlTemplate" + case revisionUrlTemplate = "revisionUrlTemplate" + case thirdPartyConfigurationUrl = "thirdPartyConfigurationUrl" + } + } + public struct S3ArtifactLocation: AWSDecodableShape { /// The name of the S3 bucket. public let bucketName: String @@ -3623,6 +4303,40 @@ extension CodePipeline { } } + public struct StageConditionState: AWSDecodableShape { + /// The states of the conditions for a run of a condition for a stage. + public let conditionStates: [ConditionState]? + /// Represents information about the latest run of a condition for a stage. + public let latestExecution: StageConditionsExecution? + + public init(conditionStates: [ConditionState]? = nil, latestExecution: StageConditionsExecution? = nil) { + self.conditionStates = conditionStates + self.latestExecution = latestExecution + } + + private enum CodingKeys: String, CodingKey { + case conditionStates = "conditionStates" + case latestExecution = "latestExecution" + } + } + + public struct StageConditionsExecution: AWSDecodableShape { + /// The status of a run of a condition for a stage. + public let status: ConditionExecutionStatus? + /// A summary of the run of the condition for a stage. + public let summary: String? + + public init(status: ConditionExecutionStatus? = nil, summary: String? = nil) { + self.status = status + self.summary = summary + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + case summary = "summary" + } + } + public struct StageContext: AWSDecodableShape { /// The name of the stage. public let name: String? @@ -3639,37 +4353,48 @@ extension CodePipeline { public struct StageDeclaration: AWSEncodableShape & AWSDecodableShape { /// The actions included in a stage. public let actions: [ActionDeclaration] + /// The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met. + public let beforeEntry: BeforeEntryConditions? /// Reserved for future use. public let blockers: [BlockerDeclaration]? /// The name of the stage. public let name: String /// The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage. public let onFailure: FailureConditions? + /// The method to use when a stage has succeeded. For example, configuring this field for conditions will allow the stage to succeed when the conditions are met. + public let onSuccess: SuccessConditions? - public init(actions: [ActionDeclaration], blockers: [BlockerDeclaration]? = nil, name: String, onFailure: FailureConditions? = nil) { + public init(actions: [ActionDeclaration], beforeEntry: BeforeEntryConditions? = nil, blockers: [BlockerDeclaration]? = nil, name: String, onFailure: FailureConditions? = nil, onSuccess: SuccessConditions? = nil) { self.actions = actions + self.beforeEntry = beforeEntry self.blockers = blockers self.name = name self.onFailure = onFailure + self.onSuccess = onSuccess } public func validate(name: String) throws { try self.actions.forEach { try $0.validate(name: "\(name).actions[]") } + try self.beforeEntry?.validate(name: "\(name).beforeEntry") try self.blockers?.forEach { try $0.validate(name: "\(name).blockers[]") } try self.validate(self.name, name: "name", parent: name, max: 100) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9.@\\-_]+$") + try self.onFailure?.validate(name: "\(name).onFailure") + try self.onSuccess?.validate(name: "\(name).onSuccess") } private enum CodingKeys: String, CodingKey { case actions = "actions" + case beforeEntry = "beforeEntry" case blockers = "blockers" case name = "name" case onFailure = "onFailure" + case onSuccess = "onSuccess" } } @@ -3697,6 +4422,8 @@ extension CodePipeline { public struct StageState: AWSDecodableShape { /// The state of the stage. public let actionStates: [ActionState]? + /// The state of the entry conditions for a stage. + public let beforeEntryConditionState: StageConditionState? public let inboundExecution: StageExecution? /// The inbound executions for a stage. public let inboundExecutions: [StageExecution]? @@ -3704,24 +4431,34 @@ extension CodePipeline { public let inboundTransitionState: TransitionState? /// Information about the latest execution in the stage, including its ID and status. public let latestExecution: StageExecution? + /// The state of the failure conditions for a stage. + public let onFailureConditionState: StageConditionState? + /// The state of the success conditions for a stage. + public let onSuccessConditionState: StageConditionState? /// The name of the stage. public let stageName: String? - public init(actionStates: [ActionState]? = nil, inboundExecution: StageExecution? = nil, inboundExecutions: [StageExecution]? = nil, inboundTransitionState: TransitionState? = nil, latestExecution: StageExecution? = nil, stageName: String? = nil) { + public init(actionStates: [ActionState]? = nil, beforeEntryConditionState: StageConditionState? = nil, inboundExecution: StageExecution? = nil, inboundExecutions: [StageExecution]? = nil, inboundTransitionState: TransitionState? = nil, latestExecution: StageExecution? = nil, onFailureConditionState: StageConditionState? = nil, onSuccessConditionState: StageConditionState? = nil, stageName: String? = nil) { self.actionStates = actionStates + self.beforeEntryConditionState = beforeEntryConditionState self.inboundExecution = inboundExecution self.inboundExecutions = inboundExecutions self.inboundTransitionState = inboundTransitionState self.latestExecution = latestExecution + self.onFailureConditionState = onFailureConditionState + self.onSuccessConditionState = onSuccessConditionState self.stageName = stageName } private enum CodingKeys: String, CodingKey { case actionStates = "actionStates" + case beforeEntryConditionState = "beforeEntryConditionState" case inboundExecution = "inboundExecution" case inboundExecutions = "inboundExecutions" case inboundTransitionState = "inboundTransitionState" case latestExecution = "latestExecution" + case onFailureConditionState = "onFailureConditionState" + case onSuccessConditionState = "onSuccessConditionState" case stageName = "stageName" } } @@ -3860,6 +4597,27 @@ extension CodePipeline { } } + public struct SuccessConditions: AWSEncodableShape & AWSDecodableShape { + /// The conditions that are success conditions. + public let conditions: [Condition] + + public init(conditions: [Condition]) { + self.conditions = conditions + } + + public func validate(name: String) throws { + try self.conditions.forEach { + try $0.validate(name: "\(name).conditions[]") + } + try self.validate(self.conditions, name: "conditions", parent: name, max: 1) + try self.validate(self.conditions, name: "conditions", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case conditions = "conditions" + } + } + public struct Tag: AWSEncodableShape & AWSDecodableShape { /// The tag's key. public let key: String @@ -4200,6 +4958,7 @@ public struct CodePipelineErrorType: AWSErrorType { case approvalAlreadyCompletedException = "ApprovalAlreadyCompletedException" case concurrentModificationException = "ConcurrentModificationException" case concurrentPipelineExecutionsLimitExceededException = "ConcurrentPipelineExecutionsLimitExceededException" + case conditionNotOverridableException = "ConditionNotOverridableException" case conflictException = "ConflictException" case duplicatedStopRequestException = "DuplicatedStopRequestException" case invalidActionDeclarationException = "InvalidActionDeclarationException" @@ -4264,6 +5023,8 @@ public struct CodePipelineErrorType: AWSErrorType { public static var concurrentModificationException: Self { .init(.concurrentModificationException) } /// The pipeline has reached the limit for concurrent pipeline executions. public static var concurrentPipelineExecutionsLimitExceededException: Self { .init(.concurrentPipelineExecutionsLimitExceededException) } + /// Unable to override because the condition does not allow overrides. + public static var conditionNotOverridableException: Self { .init(.conditionNotOverridableException) } /// Your request cannot be handled because the pipeline is busy handling ongoing activities. Try again later. public static var conflictException: Self { .init(.conflictException) } /// The pipeline execution is already in a Stopping state. If you already chose to stop and wait, you cannot make that request again. You can choose to stop and abandon now, but be aware that this option can lead to failed tasks or out of sequence tasks. If you already chose to stop and abandon, you cannot make that request again. diff --git a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift index f806e627c5..791004736d 100644 --- a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift +++ b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift @@ -112,7 +112,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// This IAM-authenticated API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message. Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users confirm their accounts when they respond to their invitation email message and choose a password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This IAM-authenticated API operation confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required. This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func adminConfirmSignUp(_ input: AdminConfirmSignUpRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminConfirmSignUpResponse { return try await self.client.execute( @@ -125,7 +125,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Creates a new user in the specified user pool. If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS). This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password. Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new user in the specified user pool. If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS). This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password. Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func adminCreateUser(_ input: AdminCreateUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminCreateUserResponse { return try await self.client.execute( @@ -242,7 +242,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func adminInitiateAuth(_ input: AdminInitiateAuthRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminInitiateAuthResponse { return try await self.client.execute( @@ -320,7 +320,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Resets the specified user's password in a user pool as an administrator. Works on any user. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Resets the specified user's password in a user pool as an administrator. Works on any user. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func adminResetUserPassword(_ input: AdminResetUserPasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminResetUserPasswordResponse { return try await self.client.execute( @@ -333,7 +333,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. An AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. An AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func adminRespondToAuthChallenge(_ input: AdminRespondToAuthChallengeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminRespondToAuthChallengeResponse { return try await self.client.execute( @@ -411,7 +411,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. In addition to updating user attributes, this API can also be used to mark phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. In addition to updating user attributes, this API can also be used to mark phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func adminUpdateUserAttributes(_ input: AdminUpdateUserAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminUpdateUserAttributesResponse { return try await self.client.execute( @@ -437,7 +437,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito. Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito. Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable public func associateSoftwareToken(_ input: AssociateSoftwareTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateSoftwareTokenResponse { return try await self.client.execute( @@ -554,7 +554,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func createUserPool(_ input: CreateUserPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserPoolResponse { return try await self.client.execute( @@ -801,7 +801,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword. If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword. If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func forgotPassword(_ input: ForgotPasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ForgotPasswordResponse { return try await self.client.execute( @@ -866,7 +866,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Gets the detailed activity logging configuration for a user pool. + /// Gets the logging configuration of a user pool. @Sendable public func getLogDeliveryConfiguration(_ input: GetLogDeliveryConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetLogDeliveryConfigurationResponse { return try await self.client.execute( @@ -918,7 +918,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func getUserAttributeVerificationCode(_ input: GetUserAttributeVerificationCodeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetUserAttributeVerificationCodeResponse { return try await self.client.execute( @@ -957,7 +957,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func initiateAuth(_ input: InitiateAuthRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InitiateAuthResponse { return try await self.client.execute( @@ -1100,7 +1100,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Resends the confirmation (for confirmation of registration) to a specific user in the user pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Resends the confirmation (for confirmation of registration) to a specific user in the user pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func resendConfirmationCode(_ input: ResendConfirmationCodeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ResendConfirmationCodeResponse { return try await self.client.execute( @@ -1113,7 +1113,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func respondToAuthChallenge(_ input: RespondToAuthChallengeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RespondToAuthChallengeResponse { return try await self.client.execute( @@ -1139,7 +1139,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Sets up or modifies the detailed activity logging configuration of a user pool. + /// Sets up or modifies the logging configuration of a user pool. User pools can export user notification logs and advanced security features user activity logs. @Sendable public func setLogDeliveryConfiguration(_ input: SetLogDeliveryConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SetLogDeliveryConfigurationResponse { return try await self.client.execute( @@ -1191,7 +1191,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Sets the user pool multi-factor authentication (MFA) configuration. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Sets the user pool multi-factor authentication (MFA) configuration. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func setUserPoolMfaConfig(_ input: SetUserPoolMfaConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SetUserPoolMfaConfigResponse { return try await self.client.execute( @@ -1217,7 +1217,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// Registers the user in the specified user pool and creates a user name, password, and user attributes. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// Registers the user in the specified user pool and creates a user name, password, and user attributes. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func signUp(_ input: SignUpRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SignUpResponse { return try await self.client.execute( @@ -1347,7 +1347,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. + /// With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @Sendable public func updateUserAttributes(_ input: UpdateUserAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateUserAttributesResponse { return try await self.client.execute( @@ -1360,7 +1360,7 @@ public struct CognitoIdentityProvider: AWSService { ) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable public func updateUserPool(_ input: UpdateUserPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateUserPoolResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift index 4b2d0ad320..9ff313dfe1 100644 --- a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift +++ b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift @@ -34,6 +34,12 @@ extension CognitoIdentityProvider { public var description: String { return self.rawValue } } + public enum AdvancedSecurityEnabledModeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case audit = "AUDIT" + case enforced = "ENFORCED" + public var description: String { return self.rawValue } + } + public enum AdvancedSecurityModeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case audit = "AUDIT" case enforced = "ENFORCED" @@ -163,6 +169,7 @@ extension CognitoIdentityProvider { } public enum EventSourceName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case userAuthEvents = "userAuthEvents" case userNotification = "userNotification" public var description: String { return self.rawValue } } @@ -206,6 +213,7 @@ extension CognitoIdentityProvider { public enum LogLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case error = "ERROR" + case info = "INFO" public var description: String { return self.rawValue } } @@ -1644,6 +1652,19 @@ extension CognitoIdentityProvider { public init() {} } + public struct AdvancedSecurityAdditionalFlowsType: AWSEncodableShape & AWSDecodableShape { + /// The operating mode of advanced security features in custom authentication with Custom authentication challenge Lambda triggers. + public let customAuthMode: AdvancedSecurityEnabledModeType? + + public init(customAuthMode: AdvancedSecurityEnabledModeType? = nil) { + self.customAuthMode = customAuthMode + } + + private enum CodingKeys: String, CodingKey { + case customAuthMode = "CustomAuthMode" + } + } + public struct AnalyticsConfigurationType: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project to integrate with the chosen user pool Client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares. public let applicationArn: String? @@ -2471,7 +2492,7 @@ extension CognitoIdentityProvider { public let idTokenValidity: Int? /// A list of allowed logout URLs for the IdPs. public let logoutURLs: [String]? - /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. + /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. Defaults to LEGACY when you don't provide a value. public let preventUserExistenceErrors: PreventUserExistenceErrorTypes? /// The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes. public let readAttributes: [String]? @@ -3562,6 +3583,25 @@ extension CognitoIdentityProvider { } } + public struct FirehoseConfigurationType: AWSEncodableShape & AWSDecodableShape { + /// The ARN of an Amazon Data Firehose stream that's the destination for advanced security features log export. + public let streamArn: String? + + public init(streamArn: String? = nil) { + self.streamArn = streamArn + } + + public func validate(name: String) throws { + try self.validate(self.streamArn, name: "streamArn", parent: name, max: 2048) + try self.validate(self.streamArn, name: "streamArn", parent: name, min: 20) + try self.validate(self.streamArn, name: "streamArn", parent: name, pattern: "^arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:([\\w+=/,.@-]*)?:[0-9]+:[\\w+=/,.@-]+(:[\\w+=/,.@-]+)?(:[\\w+=/,.@-]+)?$") + } + + private enum CodingKeys: String, CodingKey { + case streamArn = "StreamArn" + } + } + public struct ForgetDeviceRequest: AWSEncodableShape { /// A valid access token that Amazon Cognito issued to the user whose registered device you want to forget. public let accessToken: String? @@ -3805,7 +3845,7 @@ extension CognitoIdentityProvider { } public struct GetLogDeliveryConfigurationRequest: AWSEncodableShape { - /// The ID of the user pool where you want to view detailed activity logging configuration. + /// The ID of the user pool that has the logging configuration that you want to view. public let userPoolId: String public init(userPoolId: String) { @@ -3824,7 +3864,7 @@ extension CognitoIdentityProvider { } public struct GetLogDeliveryConfigurationResponse: AWSDecodableShape { - /// The detailed activity logging configuration of the requested user pool. + /// The logging configuration of the requested user pool. public let logDeliveryConfiguration: LogDeliveryConfigurationType? public init(logDeliveryConfiguration: LogDeliveryConfigurationType? = nil) { @@ -4848,34 +4888,44 @@ extension CognitoIdentityProvider { } public struct LogConfigurationType: AWSEncodableShape & AWSDecodableShape { - /// The CloudWatch logging destination of a user pool. + /// The CloudWatch log group destination of user pool detailed activity logs, or of user activity log export with advanced security features. public let cloudWatchLogsConfiguration: CloudWatchLogsConfigurationType? - /// The source of events that your user pool sends for detailed activity logging. + /// The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to userNotification. To send info-level logs about advanced security features user activity, set to userAuthEvents. public let eventSource: EventSourceName - /// The errorlevel selection of logs that a user pool sends for detailed activity logging. + /// The Amazon Data Firehose stream destination of user activity log export with advanced security features. To activate this setting, advanced security features must be active in your user pool. + public let firehoseConfiguration: FirehoseConfigurationType? + /// The errorlevel selection of logs that a user pool sends for detailed activity logging. To send userNotification activity with information about message delivery, choose ERROR with CloudWatchLogsConfiguration. To send userAuthEvents activity with user logs from advanced security features, choose INFO with one of CloudWatchLogsConfiguration, FirehoseConfiguration, or S3Configuration. public let logLevel: LogLevel + /// The Amazon S3 bucket destination of user activity log export with advanced security features. To activate this setting, advanced security features must be active in your user pool. + public let s3Configuration: S3ConfigurationType? - public init(cloudWatchLogsConfiguration: CloudWatchLogsConfigurationType? = nil, eventSource: EventSourceName, logLevel: LogLevel) { + public init(cloudWatchLogsConfiguration: CloudWatchLogsConfigurationType? = nil, eventSource: EventSourceName, firehoseConfiguration: FirehoseConfigurationType? = nil, logLevel: LogLevel, s3Configuration: S3ConfigurationType? = nil) { self.cloudWatchLogsConfiguration = cloudWatchLogsConfiguration self.eventSource = eventSource + self.firehoseConfiguration = firehoseConfiguration self.logLevel = logLevel + self.s3Configuration = s3Configuration } public func validate(name: String) throws { try self.cloudWatchLogsConfiguration?.validate(name: "\(name).cloudWatchLogsConfiguration") + try self.firehoseConfiguration?.validate(name: "\(name).firehoseConfiguration") + try self.s3Configuration?.validate(name: "\(name).s3Configuration") } private enum CodingKeys: String, CodingKey { case cloudWatchLogsConfiguration = "CloudWatchLogsConfiguration" case eventSource = "EventSource" + case firehoseConfiguration = "FirehoseConfiguration" case logLevel = "LogLevel" + case s3Configuration = "S3Configuration" } } public struct LogDeliveryConfigurationType: AWSDecodableShape { - /// The detailed activity logging destination of a user pool. + /// A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs. public let logConfigurations: [LogConfigurationType] - /// The ID of the user pool where you configured detailed activity logging. + /// The ID of the user pool where you configured logging. public let userPoolId: String public init(logConfigurations: [LogConfigurationType], userPoolId: String) { @@ -5064,6 +5114,8 @@ extension CognitoIdentityProvider { public struct PasswordPolicyType: AWSEncodableShape & AWSDecodableShape { /// The minimum length of the password in the policy that you have set. This value can't be less than 6. public let minimumLength: Int? + /// The number of previous passwords that you want Amazon Cognito to restrict each user from reusing. Users can't set a password that matches any of n previous passwords, where n is the value of PasswordHistorySize. Password history isn't enforced and isn't displayed in DescribeUserPool responses when you set this value to 0 or don't provide it. To activate this setting, advanced security features must be active in your user pool. + public let passwordHistorySize: Int? /// In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password. public let requireLowercase: Bool? /// In the password policy that you have set, refers to whether you have required users to use at least one number in their password. @@ -5075,8 +5127,9 @@ extension CognitoIdentityProvider { /// The number of days a temporary password is valid in the password policy. If the user doesn't sign in during this time, an administrator must reset their password. Defaults to 7. If you submit a value of 0, Amazon Cognito treats it as a null value and sets TemporaryPasswordValidityDays to its default value. When you set TemporaryPasswordValidityDays for a user pool, you can no longer set a value for the legacy UnusedAccountValidityDays parameter in that user pool. public let temporaryPasswordValidityDays: Int? - public init(minimumLength: Int? = nil, requireLowercase: Bool? = nil, requireNumbers: Bool? = nil, requireSymbols: Bool? = nil, requireUppercase: Bool? = nil, temporaryPasswordValidityDays: Int? = nil) { + public init(minimumLength: Int? = nil, passwordHistorySize: Int? = nil, requireLowercase: Bool? = nil, requireNumbers: Bool? = nil, requireSymbols: Bool? = nil, requireUppercase: Bool? = nil, temporaryPasswordValidityDays: Int? = nil) { self.minimumLength = minimumLength + self.passwordHistorySize = passwordHistorySize self.requireLowercase = requireLowercase self.requireNumbers = requireNumbers self.requireSymbols = requireSymbols @@ -5087,12 +5140,15 @@ extension CognitoIdentityProvider { public func validate(name: String) throws { try self.validate(self.minimumLength, name: "minimumLength", parent: name, max: 99) try self.validate(self.minimumLength, name: "minimumLength", parent: name, min: 6) + try self.validate(self.passwordHistorySize, name: "passwordHistorySize", parent: name, max: 24) + try self.validate(self.passwordHistorySize, name: "passwordHistorySize", parent: name, min: 0) try self.validate(self.temporaryPasswordValidityDays, name: "temporaryPasswordValidityDays", parent: name, max: 365) try self.validate(self.temporaryPasswordValidityDays, name: "temporaryPasswordValidityDays", parent: name, min: 0) } private enum CodingKeys: String, CodingKey { case minimumLength = "MinimumLength" + case passwordHistorySize = "PasswordHistorySize" case requireLowercase = "RequireLowercase" case requireNumbers = "RequireNumbers" case requireSymbols = "RequireSymbols" @@ -5500,6 +5556,25 @@ extension CognitoIdentityProvider { } } + public struct S3ConfigurationType: AWSEncodableShape & AWSDecodableShape { + /// The ARN of an Amazon S3 bucket that's the destination for advanced security features log export. + public let bucketArn: String? + + public init(bucketArn: String? = nil) { + self.bucketArn = bucketArn + } + + public func validate(name: String) throws { + try self.validate(self.bucketArn, name: "bucketArn", parent: name, max: 1024) + try self.validate(self.bucketArn, name: "bucketArn", parent: name, min: 3) + try self.validate(self.bucketArn, name: "bucketArn", parent: name, pattern: "^arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:::[\\w+=/,.@-]+(:[\\w+=/,.@-]+)?(:[\\w+=/,.@-]+)?$") + } + + private enum CodingKeys: String, CodingKey { + case bucketArn = "BucketArn" + } + } + public struct SMSMfaSettingsType: AWSEncodableShape { /// Specifies whether SMS text message MFA is activated. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts, unless device tracking is turned on and the device has been trusted. public let enabled: Bool? @@ -5563,9 +5638,9 @@ extension CognitoIdentityProvider { } public struct SetLogDeliveryConfigurationRequest: AWSEncodableShape { - /// A collection of all of the detailed activity logging configurations for a user pool. + /// A collection of the logging configurations for a user pool. public let logConfigurations: [LogConfigurationType] - /// The ID of the user pool where you want to configure detailed activity logging . + /// The ID of the user pool where you want to configure logging. public let userPoolId: String public init(logConfigurations: [LogConfigurationType], userPoolId: String) { @@ -5577,7 +5652,7 @@ extension CognitoIdentityProvider { try self.logConfigurations.forEach { try $0.validate(name: "\(name).logConfigurations[]") } - try self.validate(self.logConfigurations, name: "logConfigurations", parent: name, max: 1) + try self.validate(self.logConfigurations, name: "logConfigurations", parent: name, max: 2) try self.validate(self.userPoolId, name: "userPoolId", parent: name, max: 55) try self.validate(self.userPoolId, name: "userPoolId", parent: name, min: 1) try self.validate(self.userPoolId, name: "userPoolId", parent: name, pattern: "^[\\w-]+_[0-9a-zA-Z]+$") @@ -5893,7 +5968,7 @@ extension CognitoIdentityProvider { public let codeDeliveryDetails: CodeDeliveryDetailsType? /// A response from the server indicating that a user registration has been confirmed. public let userConfirmed: Bool - /// The UUID of the authenticated user. This isn't the same as username. + /// The 128-bit ID of the authenticated user. This isn't the same as username. public let userSub: String public init(codeDeliveryDetails: CodeDeliveryDetailsType? = nil, userConfirmed: Bool, userSub: String) { @@ -6578,7 +6653,7 @@ extension CognitoIdentityProvider { public let idTokenValidity: Int? /// A list of allowed logout URLs for the IdPs. public let logoutURLs: [String]? - /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. + /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. Defaults to LEGACY when you don't provide a value. public let preventUserExistenceErrors: PreventUserExistenceErrorTypes? /// The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes. public let readAttributes: [String]? @@ -6991,14 +7066,18 @@ extension CognitoIdentityProvider { } public struct UserPoolAddOnsType: AWSEncodableShape & AWSDecodableShape { - /// The operating mode of advanced security features in your user pool. + /// Advanced security configuration options for additional authentication types in your user pool, including custom authentication. + public let advancedSecurityAdditionalFlows: AdvancedSecurityAdditionalFlowsType? + /// The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication. public let advancedSecurityMode: AdvancedSecurityModeType - public init(advancedSecurityMode: AdvancedSecurityModeType) { + public init(advancedSecurityAdditionalFlows: AdvancedSecurityAdditionalFlowsType? = nil, advancedSecurityMode: AdvancedSecurityModeType) { + self.advancedSecurityAdditionalFlows = advancedSecurityAdditionalFlows self.advancedSecurityMode = advancedSecurityMode } private enum CodingKeys: String, CodingKey { + case advancedSecurityAdditionalFlows = "AdvancedSecurityAdditionalFlows" case advancedSecurityMode = "AdvancedSecurityMode" } } @@ -7087,7 +7166,7 @@ extension CognitoIdentityProvider { public let lastModifiedDate: Date? /// A list of allowed logout URLs for the IdPs. public let logoutURLs: [String]? - /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the old behavior of Amazon Cognito where user existence related errors aren't prevented. + /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. Defaults to LEGACY when you don't provide a value. public let preventUserExistenceErrors: PreventUserExistenceErrorTypes? /// The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes. public let readAttributes: [String]? @@ -7634,6 +7713,7 @@ public struct CognitoIdentityProviderErrorType: AWSErrorType { case limitExceededException = "LimitExceededException" case mfaMethodNotFoundException = "MFAMethodNotFoundException" case notAuthorizedException = "NotAuthorizedException" + case passwordHistoryPolicyViolationException = "PasswordHistoryPolicyViolationException" case passwordResetRequiredException = "PasswordResetRequiredException" case preconditionNotMetException = "PreconditionNotMetException" case resourceNotFoundException = "ResourceNotFoundException" @@ -7716,6 +7796,8 @@ public struct CognitoIdentityProviderErrorType: AWSErrorType { public static var mfaMethodNotFoundException: Self { .init(.mfaMethodNotFoundException) } /// This exception is thrown when a user isn't authorized. public static var notAuthorizedException: Self { .init(.notAuthorizedException) } + /// The message returned when a user's new password matches a previous password and doesn't comply with the password-history policy. + public static var passwordHistoryPolicyViolationException: Self { .init(.passwordHistoryPolicyViolationException) } /// This exception is thrown when a password reset is required. public static var passwordResetRequiredException: Self { .init(.passwordResetRequiredException) } /// This exception is thrown when a precondition is not met. diff --git a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift index 8aa8cc84df..6e5294a730 100644 --- a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift +++ b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift @@ -1557,7 +1557,7 @@ extension ComputeOptimizer { public struct ECSServiceRecommendationFilter: AWSEncodableShape { /// The name of the filter. Specify Finding to return recommendations with a specific finding classification. Specify FindingReasonCode to return recommendations with a specific finding reason code. You can filter your Amazon ECS service recommendations by tag:key and tag-key tags. A tag:key is a key and value combination of a tag assigned to your Amazon ECS service recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all Amazon ECS service recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value. A tag-key is the key of a tag assigned to your Amazon ECS service recommendations. Use this filter to find all of your Amazon ECS service recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your Amazon ECS service recommendations with a tag key value of Owner or without any tag keys assigned. public let name: ECSServiceRecommendationFilterName? - /// The value of the filter. The valid values for this parameter are as follows: If you specify the name parameter as Finding, specify Optimized, NotOptimized, or Unavailable. If you specify the name parameter as FindingReasonCode, specify CPUUnderprovisioned, CPUOverprovisioned, MemoryUnderprovisioned, or MemoryOverprovisioned. + /// The value of the filter. The valid values for this parameter are as follows: If you specify the name parameter as Finding, specify Optimized, Underprovisioned, or Overprovisioned. If you specify the name parameter as FindingReasonCode, specify CPUUnderprovisioned, CPUOverprovisioned, MemoryUnderprovisioned, or MemoryOverprovisioned. public let values: [String]? public init(name: ECSServiceRecommendationFilterName? = nil, values: [String]? = nil) { @@ -3578,7 +3578,7 @@ extension ComputeOptimizer { public let externalMetricsPreference: ExternalMetricsPreference? /// The status of the inferred workload types recommendation preference to create or update. The inferred workload type feature is active by default. To deactivate it, create a recommendation preference. Specify the Inactive status to deactivate the feature, or specify Active to activate it. For more information, see Inferred workload types in the Compute Optimizer User Guide. public let inferredWorkloadTypes: InferredWorkloadTypesPreference? - /// The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. When this preference isn't specified, we use the default value DAYS_14. You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types. + /// The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. When this preference isn't specified, we use the default value DAYS_14. You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types. Amazon EC2 instance lookback preferences can be set at the organization, account, and resource levels. Auto Scaling group lookback preferences can only be set at the resource level. public let lookBackPeriod: LookBackPeriodPreference? /// The preference to control which resource type values are considered when generating rightsizing recommendations. You can specify this preference as a combination of include and exclude lists. You must specify either an includeList or excludeList. If the preference is an empty set of resource type values, an error occurs. You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types. public let preferredResources: [PreferredResource]? diff --git a/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift b/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift index 8a08ebd0ea..48edf11c14 100644 --- a/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift +++ b/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift @@ -2391,7 +2391,7 @@ extension ConfigService { public func validate(name: String) throws { try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, max: 64) try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, min: 1) - try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "\\S") + try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "^[A-Za-z0-9-_]+$") } private enum CodingKeys: String, CodingKey { @@ -4969,7 +4969,7 @@ extension ConfigService { try self.validate(self.limit, name: "limit", parent: name, min: 0) try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, max: 64) try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, min: 1) - try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "\\S") + try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "^[A-Za-z0-9-_]+$") } private enum CodingKeys: String, CodingKey { @@ -5060,7 +5060,7 @@ extension ConfigService { public func validate(name: String) throws { try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, max: 64) try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, min: 1) - try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "\\S") + try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "^[A-Za-z0-9-_]+$") } private enum CodingKeys: String, CodingKey { @@ -6480,7 +6480,7 @@ extension ConfigService { try self.validate(self.excludedAccounts, name: "excludedAccounts", parent: name, max: 1000) try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, max: 64) try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, min: 1) - try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "\\S") + try self.validate(self.organizationConfigRuleName, name: "organizationConfigRuleName", parent: name, pattern: "^[A-Za-z0-9-_]+$") try self.organizationCustomPolicyRuleMetadata?.validate(name: "\(name).organizationCustomPolicyRuleMetadata") try self.organizationCustomRuleMetadata?.validate(name: "\(name).organizationCustomRuleMetadata") try self.organizationManagedRuleMetadata?.validate(name: "\(name).organizationManagedRuleMetadata") diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index 1841850e8f..866afcb094 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -433,7 +433,7 @@ public struct Connect: AWSService { ) } - /// This API is in preview release for Amazon Connect and is subject to change. Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does not allow for any configurations on features, such as Contact Lens for Amazon Connect. Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. + /// This API is in preview release for Amazon Connect and is subject to change. Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does not allow for any configurations on features, such as Contact Lens for Amazon Connect. For more information, see Create an Amazon Connect instance in the Amazon Connect Administrator Guide. Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. /// If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. /// You must wait 30 days before you can restart creating and deleting instances in your account. @Sendable @@ -487,7 +487,7 @@ public struct Connect: AWSService { ) } - /// Creates a new predefined attribute for the specified Amazon Connect instance. + /// Creates a new predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. @Sendable public func createPredefinedAttribute(_ input: CreatePredefinedAttributeRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -565,7 +565,7 @@ public struct Connect: AWSService { ) } - /// Creates a security profile. + /// Creates a security profile. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. @Sendable public func createSecurityProfile(_ input: CreateSecurityProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSecurityProfileResponse { return try await self.client.execute( @@ -775,7 +775,7 @@ public struct Connect: AWSService { ) } - /// This API is in preview release for Amazon Connect and is subject to change. Deletes the Amazon Connect instance. Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. + /// This API is in preview release for Amazon Connect and is subject to change. Deletes the Amazon Connect instance. For more information, see Delete your Amazon Connect instance in the Amazon Connect Administrator Guide. Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. /// If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. /// You must wait 30 days before you can restart creating and deleting instances in your account. @Sendable @@ -829,7 +829,7 @@ public struct Connect: AWSService { ) } - /// Deletes a queue. + /// Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website. @Sendable public func deleteQueue(_ input: DeleteQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1155,7 +1155,7 @@ public struct Connect: AWSService { ) } - /// Describes a predefined attribute for the specified Amazon Connect instance. + /// Describes a predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. @Sendable public func describePredefinedAttribute(_ input: DescribePredefinedAttributeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribePredefinedAttributeResponse { return try await self.client.execute( @@ -1233,7 +1233,7 @@ public struct Connect: AWSService { ) } - /// Gets basic information about the security profle. + /// Gets basic information about the security profile. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. @Sendable public func describeSecurityProfile(_ input: DescribeSecurityProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSecurityProfileResponse { return try await self.client.execute( @@ -1949,7 +1949,7 @@ public struct Connect: AWSService { ) } - /// Lists predefined attributes for the specified Amazon Connect instance. + /// Lists predefined attributes for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. @Sendable public func listPredefinedAttributes(_ input: ListPredefinedAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPredefinedAttributesResponse { return try await self.client.execute( @@ -2092,7 +2092,7 @@ public struct Connect: AWSService { ) } - /// Lists the permissions granted to a security profile. + /// Lists the permissions granted to a security profile. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. @Sendable public func listSecurityProfilePermissions(_ input: ListSecurityProfilePermissionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSecurityProfilePermissionsResponse { return try await self.client.execute( @@ -2105,7 +2105,7 @@ public struct Connect: AWSService { ) } - /// Provides summary information about the security profiles for the specified Amazon Connect instance. For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. + /// Provides summary information about the security profiles for the specified Amazon Connect instance. For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. @Sendable public func listSecurityProfiles(_ input: ListSecurityProfilesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSecurityProfilesResponse { return try await self.client.execute( @@ -2417,7 +2417,7 @@ public struct Connect: AWSService { ) } - /// Predefined attributes that meet certain criteria. + /// Searches predefined attributes that meet certain criteria. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. @Sendable public func searchPredefinedAttributes(_ input: SearchPredefinedAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchPredefinedAttributesResponse { return try await self.client.execute( @@ -2495,7 +2495,7 @@ public struct Connect: AWSService { ) } - /// Searches security profiles in an Amazon Connect instance, with optional filtering. + /// Searches security profiles in an Amazon Connect instance, with optional filtering. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. @Sendable public func searchSecurityProfiles(_ input: SearchSecurityProfilesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchSecurityProfilesResponse { return try await self.client.execute( @@ -3042,7 +3042,7 @@ public struct Connect: AWSService { ) } - /// Updates a predefined attribute for the specified Amazon Connect instance. + /// Updates a predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. @Sendable public func updatePredefinedAttribute(_ input: UpdatePredefinedAttributeRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -3237,7 +3237,7 @@ public struct Connect: AWSService { ) } - /// Updates a security profile. + /// Updates a security profile. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. @Sendable public func updateSecurityProfile(_ input: UpdateSecurityProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -3898,7 +3898,7 @@ extension Connect { ) } - /// Lists predefined attributes for the specified Amazon Connect instance. + /// Lists predefined attributes for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -4107,7 +4107,7 @@ extension Connect { ) } - /// Lists the permissions granted to a security profile. + /// Lists the permissions granted to a security profile. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -4126,7 +4126,7 @@ extension Connect { ) } - /// Provides summary information about the security profiles for the specified Amazon Connect instance. For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. + /// Provides summary information about the security profiles for the specified Amazon Connect instance. For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -4430,7 +4430,7 @@ extension Connect { ) } - /// Predefined attributes that meet certain criteria. + /// Searches predefined attributes that meet certain criteria. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -4544,7 +4544,7 @@ extension Connect { ) } - /// Searches security profiles in an Amazon Connect instance, with optional filtering. + /// Searches security profiles in an Amazon Connect instance, with optional filtering. For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 944712bf9a..70c69edf9e 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -771,11 +771,27 @@ extension Connect { public var description: String { return self.rawValue } } + public enum RealTimeContactAnalysisPostContactSummaryFailureCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failedSafetyGuidelines = "FAILED_SAFETY_GUIDELINES" + case insufficientConversationContent = "INSUFFICIENT_CONVERSATION_CONTENT" + case internalError = "INTERNAL_ERROR" + case invalidAnalysisConfiguration = "INVALID_ANALYSIS_CONFIGURATION" + case quotaExceeded = "QUOTA_EXCEEDED" + public var description: String { return self.rawValue } + } + + public enum RealTimeContactAnalysisPostContactSummaryStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + public var description: String { return self.rawValue } + } + public enum RealTimeContactAnalysisSegmentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case attachments = "Attachments" case categories = "Categories" case event = "Event" case issues = "Issues" + case postContactSummary = "PostContactSummary" case transcript = "Transcript" public var description: String { return self.rawValue } } @@ -1264,6 +1280,8 @@ extension Connect { case categories(RealTimeContactAnalysisSegmentCategories) case event(RealTimeContactAnalysisSegmentEvent) case issues(RealTimeContactAnalysisSegmentIssues) + /// Information about the post-contact summary. + case postContactSummary(RealTimeContactAnalysisSegmentPostContactSummary) case transcript(RealTimeContactAnalysisSegmentTranscript) public init(from decoder: Decoder) throws { @@ -1288,6 +1306,9 @@ extension Connect { case .issues: let value = try container.decode(RealTimeContactAnalysisSegmentIssues.self, forKey: .issues) self = .issues(value) + case .postContactSummary: + let value = try container.decode(RealTimeContactAnalysisSegmentPostContactSummary.self, forKey: .postContactSummary) + self = .postContactSummary(value) case .transcript: let value = try container.decode(RealTimeContactAnalysisSegmentTranscript.self, forKey: .transcript) self = .transcript(value) @@ -1299,6 +1320,7 @@ extension Connect { case categories = "Categories" case event = "Event" case issues = "Issues" + case postContactSummary = "PostContactSummary" case transcript = "Transcript" } } @@ -1716,14 +1738,20 @@ extension Connect { } } - public struct AgentsCriteria: AWSDecodableShape { - /// An object to specify a list of agents, by Agent ID. + public struct AgentsCriteria: AWSEncodableShape & AWSDecodableShape { + /// An object to specify a list of agents, by user ID. public let agentIds: [String]? public init(agentIds: [String]? = nil) { self.agentIds = agentIds } + public func validate(name: String) throws { + try self.agentIds?.forEach { + try validate($0, name: "agentIds[]", parent: name, max: 256) + } + } + private enum CodingKeys: String, CodingKey { case agentIds = "AgentIds" } @@ -2494,7 +2522,7 @@ extension Connect { } } - public struct AttributeCondition: AWSDecodableShape { + public struct AttributeCondition: AWSEncodableShape & AWSDecodableShape { /// The operator of the condition. public let comparisonOperator: String? /// An object to define AgentsCriteria. @@ -2514,6 +2542,18 @@ extension Connect { self.value = value } + public func validate(name: String) throws { + try self.validate(self.comparisonOperator, name: "comparisonOperator", parent: name, max: 127) + try self.validate(self.comparisonOperator, name: "comparisonOperator", parent: name, min: 1) + try self.matchCriteria?.validate(name: "\(name).matchCriteria") + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.proficiencyLevel, name: "proficiencyLevel", parent: name, max: 5.0) + try self.validate(self.proficiencyLevel, name: "proficiencyLevel", parent: name, min: 1.0) + try self.validate(self.value, name: "value", parent: name, max: 128) + try self.validate(self.value, name: "value", parent: name, min: 1) + } + private enum CodingKeys: String, CodingKey { case comparisonOperator = "ComparisonOperator" case matchCriteria = "MatchCriteria" @@ -5018,7 +5058,7 @@ extension Connect { try validate($0.key, name: "allowedAccessControlTags.key", parent: name, min: 1) try validate($0.value, name: "allowedAccessControlTags[\"\($0.key)\"]", parent: name, max: 256) } - try self.validate(self.allowedAccessControlTags, name: "allowedAccessControlTags", parent: name, max: 2) + try self.validate(self.allowedAccessControlTags, name: "allowedAccessControlTags", parent: name, max: 4) try self.applications?.forEach { try $0.validate(name: "\(name).applications[]") } @@ -8969,7 +9009,7 @@ extension Connect { } } - public struct Expression: AWSDecodableShape { + public struct Expression: AWSEncodableShape & AWSDecodableShape { /// List of routing expressions which will be AND-ed together. public let andExpression: [Expression]? /// An object to specify the predefined attribute condition. @@ -8983,6 +9023,16 @@ extension Connect { self.orExpression = orExpression } + public func validate(name: String) throws { + try self.andExpression?.forEach { + try $0.validate(name: "\(name).andExpression[]") + } + try self.attributeCondition?.validate(name: "\(name).attributeCondition") + try self.orExpression?.forEach { + try $0.validate(name: "\(name).orExpression[]") + } + } + private enum CodingKeys: String, CodingKey { case andExpression = "AndExpression" case attributeCondition = "AttributeCondition" @@ -9610,15 +9660,15 @@ extension Connect { public struct GetMetricDataV2Request: AWSEncodableShape { /// The timestamp, in UNIX Epoch time format, at which to end the reporting interval for the retrieval of historical metrics data. The time must be later than the start time timestamp. It cannot be later than the current timestamp. public let endTime: Date - /// The filters to apply to returned metrics. You can filter on the following resources: Agents Channels Feature Queues Routing profiles Routing step expression User hierarchy groups At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow This filter is available only for contact record-driven metrics. + /// The filters to apply to returned metrics. You can filter on the following resources: Agents Campaigns Channels Feature Queues Routing profiles Routing step expression User hierarchy groups At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups. For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least one filter requirement. To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow This filter is available only for contact record-driven metrics. Campaign ARNs are valid filterValues for the CAMPAIGN filter key. public let filters: [FilterV2] - /// The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION + /// The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION public let groupings: [String]? /// The interval period and timezone to apply to returned metrics. IntervalPeriod: An aggregated grouping applied to request metrics. Valid IntervalPeriod values are: FIFTEEN_MIN | THIRTY_MIN | HOUR | DAY | WEEK | TOTAL. For example, if IntervalPeriod is selected THIRTY_MIN, StartTime and EndTime differs by 1 day, then Amazon Connect returns 48 results in the response. Each result is aggregated by the THIRTY_MIN period. By default Amazon Connect aggregates results based on the TOTAL interval period. The following list describes restrictions on StartTime and EndTime based on which IntervalPeriod is requested. FIFTEEN_MIN: The difference between StartTime and EndTime must be less than 3 days. THIRTY_MIN: The difference between StartTime and EndTime must be less than 3 days. HOUR: The difference between StartTime and EndTime must be less than 3 days. DAY: The difference between StartTime and EndTime must be less than 35 days. WEEK: The difference between StartTime and EndTime must be less than 35 days. TOTAL: The difference between StartTime and EndTime must be less than 35 days. TimeZone: The timezone applied to requested metrics. public let interval: IntervalDetails? /// The maximum number of results to return per page. public let maxResults: Int? - /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts + /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts public let metrics: [MetricV2] /// The token for the next set of results. Use the value returned in the previous /// response in the next request to retrieve the next set of results. @@ -12358,7 +12408,7 @@ extension Connect { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 100000) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.segmentTypes, name: "segmentTypes", parent: name, max: 5) + try self.validate(self.segmentTypes, name: "segmentTypes", parent: name, max: 6) } private enum CodingKeys: String, CodingKey { @@ -13316,14 +13366,18 @@ extension Connect { } } - public struct MatchCriteria: AWSDecodableShape { - /// An object to define AgentIds. + public struct MatchCriteria: AWSEncodableShape & AWSDecodableShape { + /// An object to define agentIds. public let agentsCriteria: AgentsCriteria? public init(agentsCriteria: AgentsCriteria? = nil) { self.agentsCriteria = agentsCriteria } + public func validate(name: String) throws { + try self.agentsCriteria?.validate(name: "\(name).agentsCriteria") + } + private enum CodingKeys: String, CodingKey { case agentsCriteria = "AgentsCriteria" } @@ -14669,6 +14723,27 @@ extension Connect { } } + public struct RealTimeContactAnalysisSegmentPostContactSummary: AWSDecodableShape { + /// The content of the summary. + public let content: String? + /// If the summary failed to be generated, one of the following failure codes occurs: QUOTA_EXCEEDED: The number of concurrent analytics jobs reached your service quota. INSUFFICIENT_CONVERSATION_CONTENT: The conversation needs to have at least one turn from both the participants in order to generate the summary. FAILED_SAFETY_GUIDELINES: The generated summary cannot be provided because it failed to meet system safety guidelines. INVALID_ANALYSIS_CONFIGURATION: This code occurs when, for example, you're using a language that isn't supported by generative AI-powered post-contact summaries. INTERNAL_ERROR: Internal system error. + public let failureCode: RealTimeContactAnalysisPostContactSummaryFailureCode? + /// Whether the summary was successfully COMPLETED or FAILED to be generated. + public let status: RealTimeContactAnalysisPostContactSummaryStatus + + public init(content: String? = nil, failureCode: RealTimeContactAnalysisPostContactSummaryFailureCode? = nil, status: RealTimeContactAnalysisPostContactSummaryStatus) { + self.content = content + self.failureCode = failureCode + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case content = "Content" + case failureCode = "FailureCode" + case status = "Status" + } + } + public struct RealTimeContactAnalysisSegmentTranscript: AWSDecodableShape { /// The content of the transcript. Can be redacted. public let content: String @@ -14995,6 +15070,59 @@ extension Connect { } } + public struct RoutingCriteriaInput: AWSEncodableShape { + /// When Amazon Connect does not find an available agent meeting the requirements in a step for
 a given step duration, the routing criteria will move on to the next step sequentially until a
 join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent in the queue. + public let steps: [RoutingCriteriaInputStep]? + + public init(steps: [RoutingCriteriaInputStep]? = nil) { + self.steps = steps + } + + public func validate(name: String) throws { + try self.steps?.forEach { + try $0.validate(name: "\(name).steps[]") + } + } + + private enum CodingKeys: String, CodingKey { + case steps = "Steps" + } + } + + public struct RoutingCriteriaInputStep: AWSEncodableShape { + /// An object to specify the expiration of a routing step. + public let expiry: RoutingCriteriaInputStepExpiry? + /// A tagged union to specify expression for a routing step. + public let expression: Expression? + + public init(expiry: RoutingCriteriaInputStepExpiry? = nil, expression: Expression? = nil) { + self.expiry = expiry + self.expression = expression + } + + public func validate(name: String) throws { + try self.expression?.validate(name: "\(name).expression") + } + + private enum CodingKeys: String, CodingKey { + case expiry = "Expiry" + case expression = "Expression" + } + } + + public struct RoutingCriteriaInputStepExpiry: AWSEncodableShape { + /// The number of seconds that the contact will be routed only to agents matching this routing
 step, if expiry was configured for this routing step. + public let durationInSeconds: Int? + + public init(durationInSeconds: Int? = nil) { + self.durationInSeconds = durationInSeconds + } + + private enum CodingKeys: String, CodingKey { + case durationInSeconds = "DurationInSeconds" + } + } + public struct RoutingProfile: AWSDecodableShape { /// Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time. public let agentAvailabilityTimer: AgentAvailabilityTimer? @@ -18211,7 +18339,7 @@ extension Connect { } public struct ThresholdV2: AWSEncodableShape & AWSDecodableShape { - /// The type of comparison. Only "less than" (LT) comparisons are supported. + /// The type of comparison. Only "less than" (LT) and "greater than" (GT) comparisons are supported. public let comparison: String? /// The threshold value to compare. public let thresholdValue: Double? @@ -19048,12 +19176,15 @@ extension Connect { public let queuePriority: Int64? /// The number of seconds to add or subtract from the contact's routing age. Contacts are routed to agents on a first-come, first-serve basis. This means that changing their amount of time in queue compared to others also changes their position in queue. public let queueTimeAdjustmentSeconds: Int? + /// Updates the routing criteria on the contact. These properties can be used to change how a
 contact is routed within the queue. + public let routingCriteria: RoutingCriteriaInput? - public init(contactId: String, instanceId: String, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil) { + public init(contactId: String, instanceId: String, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, routingCriteria: RoutingCriteriaInput? = nil) { self.contactId = contactId self.instanceId = instanceId self.queuePriority = queuePriority self.queueTimeAdjustmentSeconds = queueTimeAdjustmentSeconds + self.routingCriteria = routingCriteria } public func encode(to encoder: Encoder) throws { @@ -19063,6 +19194,7 @@ extension Connect { request.encodePath(self.instanceId, key: "InstanceId") try container.encodeIfPresent(self.queuePriority, forKey: .queuePriority) try container.encodeIfPresent(self.queueTimeAdjustmentSeconds, forKey: .queueTimeAdjustmentSeconds) + try container.encodeIfPresent(self.routingCriteria, forKey: .routingCriteria) } public func validate(name: String) throws { @@ -19072,11 +19204,13 @@ extension Connect { try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) try self.validate(self.queuePriority, name: "queuePriority", parent: name, max: 9223372036854775807) try self.validate(self.queuePriority, name: "queuePriority", parent: name, min: 1) + try self.routingCriteria?.validate(name: "\(name).routingCriteria") } private enum CodingKeys: String, CodingKey { case queuePriority = "QueuePriority" case queueTimeAdjustmentSeconds = "QueueTimeAdjustmentSeconds" + case routingCriteria = "RoutingCriteria" } } @@ -20098,7 +20232,7 @@ extension Connect { try validate($0.key, name: "allowedAccessControlTags.key", parent: name, min: 1) try validate($0.value, name: "allowedAccessControlTags[\"\($0.key)\"]", parent: name, max: 256) } - try self.validate(self.allowedAccessControlTags, name: "allowedAccessControlTags", parent: name, max: 2) + try self.validate(self.allowedAccessControlTags, name: "allowedAccessControlTags", parent: name, max: 4) try self.applications?.forEach { try $0.validate(name: "\(name).applications[]") } diff --git a/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_api.swift b/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_api.swift index 9fcbda169b..dcc2e2f4ae 100644 --- a/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_api.swift +++ b/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS ConnectContactLens service. /// -/// Contact Lens for Amazon Connect enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts. Contact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using Contact Lens in the Amazon Connect Administrator Guide. +/// Contact Lens actions Contact Lens data types Amazon Connect Contact Lens enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts. Amazon Connect Contact Lens provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using speech analytics in the Amazon Connect Administrator Guide. public struct ConnectContactLens: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_shapes.swift b/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_shapes.swift index ded50a1e57..2073e5d6c9 100644 --- a/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_shapes.swift +++ b/Sources/Soto/Services/ConnectContactLens/ConnectContactLens_shapes.swift @@ -26,6 +26,21 @@ import Foundation extension ConnectContactLens { // MARK: Enums + public enum PostContactSummaryFailureCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failedSafetyGuidelines = "FAILED_SAFETY_GUIDELINES" + case insufficientConversationContent = "INSUFFICIENT_CONVERSATION_CONTENT" + case internalError = "INTERNAL_ERROR" + case invalidAnalysisConfiguration = "INVALID_ANALYSIS_CONFIGURATION" + case quotaExceeded = "QUOTA_EXCEEDED" + public var description: String { return self.rawValue } + } + + public enum PostContactSummaryStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + public var description: String { return self.rawValue } + } + public enum SentimentValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case negative = "NEGATIVE" case neutral = "NEUTRAL" @@ -100,7 +115,7 @@ extension ConnectContactLens { public let contactId: String? /// The identifier of the Amazon Connect instance. public let instanceId: String? - /// The maximimum number of results to return per page. + /// The maximum number of results to return per page. public let maxResults: Int? /// The token for the next set of results. Use the value returned in the previous /// response in the next request to retrieve the next set of results. @@ -169,19 +184,44 @@ extension ConnectContactLens { } } + public struct PostContactSummary: AWSDecodableShape { + /// The content of the summary. + public let content: String? + /// If the summary failed to be generated, one of the following failure codes occurs: QUOTA_EXCEEDED: The number of concurrent analytics jobs reached your service quota. INSUFFICIENT_CONVERSATION_CONTENT: The conversation needs to have at least one turn from both the participants in order to generate the summary. FAILED_SAFETY_GUIDELINES: The generated summary cannot be provided because it failed to meet system safety guidelines. INVALID_ANALYSIS_CONFIGURATION: This code occurs when, for example, you're using a language that isn't supported by generative AI-powered post-contact summaries. INTERNAL_ERROR: Internal system error. + public let failureCode: PostContactSummaryFailureCode? + /// Whether the summary was successfully COMPLETED or FAILED to be generated. + public let status: PostContactSummaryStatus? + + public init(content: String? = nil, failureCode: PostContactSummaryFailureCode? = nil, status: PostContactSummaryStatus? = nil) { + self.content = content + self.failureCode = failureCode + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case content = "Content" + case failureCode = "FailureCode" + case status = "Status" + } + } + public struct RealtimeContactAnalysisSegment: AWSDecodableShape { /// The matched category rules. public let categories: Categories? + /// Information about the post-contact summary. + public let postContactSummary: PostContactSummary? /// The analyzed transcript. public let transcript: Transcript? - public init(categories: Categories? = nil, transcript: Transcript? = nil) { + public init(categories: Categories? = nil, postContactSummary: PostContactSummary? = nil, transcript: Transcript? = nil) { self.categories = categories + self.postContactSummary = postContactSummary self.transcript = transcript } private enum CodingKeys: String, CodingKey { case categories = "Categories" + case postContactSummary = "PostContactSummary" case transcript = "Transcript" } } @@ -197,11 +237,11 @@ extension ConnectContactLens { public let id: String? /// List of positions where issues were detected on the transcript. public let issuesDetected: [IssueDetected]? - /// The identifier of the participant. + /// The identifier of the participant. Valid values are CUSTOMER or AGENT. public let participantId: String? /// The role of participant. For example, is it a customer, agent, or system. public let participantRole: String? - /// The sentiment of the detected for this piece of transcript. + /// The sentiment detected for this piece of transcript. public let sentiment: SentimentValue? public init(beginOffsetMillis: Int? = nil, content: String? = nil, endOffsetMillis: Int? = nil, id: String? = nil, issuesDetected: [IssueDetected]? = nil, participantId: String? = nil, participantRole: String? = nil, sentiment: SentimentValue? = nil) { diff --git a/Sources/Soto/Services/ControlCatalog/ControlCatalog_api.swift b/Sources/Soto/Services/ControlCatalog/ControlCatalog_api.swift index f1f1a7c015..a02061a71c 100644 --- a/Sources/Soto/Services/ControlCatalog/ControlCatalog_api.swift +++ b/Sources/Soto/Services/ControlCatalog/ControlCatalog_api.swift @@ -73,6 +73,19 @@ public struct ControlCatalog: AWSService { // MARK: API Calls + /// Returns details about a specific control, most notably a list of Amazon Web Services Regions where this control is supported. Input a value for the ControlArn parameter, in ARN form. GetControl accepts controltower or controlcatalog control ARNs as input. Returns a controlcatalog ARN format. In the API response, controls that have the value GLOBAL in the Scope field do not show the DeployableRegions field, because it does not apply. Controls that have the value REGIONAL in the Scope field return a value for the DeployableRegions field, as shown in the example. + @Sendable + public func getControl(_ input: GetControlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetControlResponse { + return try await self.client.execute( + operation: "GetControl", + path: "/get-control", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a paginated list of common controls from the Amazon Web Services Control Catalog. You can apply an optional filter to see common controls that have a specific objective. If you don’t provide a filter, the operation returns all common controls. @Sendable public func listCommonControls(_ input: ListCommonControlsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCommonControlsResponse { @@ -86,6 +99,19 @@ public struct ControlCatalog: AWSService { ) } + /// Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples. + @Sendable + public func listControls(_ input: ListControlsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListControlsResponse { + return try await self.client.execute( + operation: "ListControls", + path: "/list-controls", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a paginated list of domains from the Amazon Web Services Control Catalog. @Sendable public func listDomains(_ input: ListDomainsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDomainsResponse { @@ -145,6 +171,25 @@ extension ControlCatalog { ) } + /// Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listControlsPaginator( + _ input: ListControlsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listControls, + inputKey: \ListControlsRequest.nextToken, + outputKey: \ListControlsResponse.nextToken, + logger: logger + ) + } + /// Returns a paginated list of domains from the Amazon Web Services Control Catalog. /// Return PaginatorSequence for operation. /// @@ -194,6 +239,15 @@ extension ControlCatalog.ListCommonControlsRequest: AWSPaginateToken { } } +extension ControlCatalog.ListControlsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ControlCatalog.ListControlsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension ControlCatalog.ListDomainsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> ControlCatalog.ListDomainsRequest { return .init( diff --git a/Sources/Soto/Services/ControlCatalog/ControlCatalog_shapes.swift b/Sources/Soto/Services/ControlCatalog/ControlCatalog_shapes.swift index c1e21166e6..1ee85282d5 100644 --- a/Sources/Soto/Services/ControlCatalog/ControlCatalog_shapes.swift +++ b/Sources/Soto/Services/ControlCatalog/ControlCatalog_shapes.swift @@ -26,6 +26,19 @@ import Foundation extension ControlCatalog { // MARK: Enums + public enum ControlBehavior: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case detective = "DETECTIVE" + case preventive = "PREVENTIVE" + case proactive = "PROACTIVE" + public var description: String { return self.rawValue } + } + + public enum ControlScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case global = "GLOBAL" + case regional = "REGIONAL" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct AssociatedDomainSummary: AWSDecodableShape { @@ -118,6 +131,27 @@ extension ControlCatalog { } } + public struct ControlSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the control. + public let arn: String + /// A description of the control, as it may appear in the console. Describes the functionality of the control. + public let description: String + /// The display name of the control. + public let name: String + + public init(arn: String, description: String, name: String) { + self.arn = arn + self.description = description + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case description = "Description" + case name = "Name" + } + } + public struct DomainResourceFilter: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the domain. public let arn: String? @@ -166,6 +200,53 @@ extension ControlCatalog { } } + public struct GetControlRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the control. It has one of the following formats: Global format arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID} Or Regional format arn:{PARTITION}:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID} Here is a more general pattern that covers Amazon Web Services Control Tower and Control Catalog ARNs: ^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+$ + public let controlArn: String + + public init(controlArn: String) { + self.controlArn = controlArn + } + + public func validate(name: String) throws { + try self.validate(self.controlArn, name: "controlArn", parent: name, max: 2048) + try self.validate(self.controlArn, name: "controlArn", parent: name, min: 34) + try self.validate(self.controlArn, name: "controlArn", parent: name, pattern: "^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+$") + } + + private enum CodingKeys: String, CodingKey { + case controlArn = "ControlArn" + } + } + + public struct GetControlResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the control. + public let arn: String + /// A term that identifies the control's functional behavior. One of Preventive, Deteictive, Proactive + public let behavior: ControlBehavior + /// A description of what the control does. + public let description: String + /// The display name of the control. + public let name: String + public let regionConfiguration: RegionConfiguration + + public init(arn: String, behavior: ControlBehavior, description: String, name: String, regionConfiguration: RegionConfiguration) { + self.arn = arn + self.behavior = behavior + self.description = description + self.name = name + self.regionConfiguration = regionConfiguration + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case behavior = "Behavior" + case description = "Description" + case name = "Name" + case regionConfiguration = "RegionConfiguration" + } + } + public struct ListCommonControlsRequest: AWSEncodableShape { /// An optional filter that narrows the results to a specific objective. This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported. public let commonControlFilter: CommonControlFilter? @@ -217,6 +298,50 @@ extension ControlCatalog { } } + public struct ListControlsRequest: AWSEncodableShape { + /// The maximum number of results on a page or for an API request call. + public let maxResults: Int? + /// The pagination token that's used to fetch the next set of results. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListControlsResponse: AWSDecodableShape { + /// Returns a list of controls, given as structures of type controlSummary. + public let controls: [ControlSummary] + /// The pagination token that's used to fetch the next set of results. + public let nextToken: String? + + public init(controls: [ControlSummary], nextToken: String? = nil) { + self.controls = controls + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case controls = "Controls" + case nextToken = "NextToken" + } + } + public struct ListDomainsRequest: AWSEncodableShape { /// The maximum number of results on a page or for an API request call. public let maxResults: Int? @@ -382,6 +507,23 @@ extension ControlCatalog { case name = "Name" } } + + public struct RegionConfiguration: AWSDecodableShape { + /// Regions in which the control is available to be deployed. + public let deployableRegions: [String]? + /// The coverage of the control, if deployed. Scope is an enumerated type, with value Regional, or Global. A control with Global scope is effective in all Amazon Web Services Regions, regardless of the Region from which it is enabled, or to which it is deployed. A control implemented by an SCP is usually Global in scope. A control with Regional scope has operations that are restricted specifically to the Region from which it is enabled and to which it is deployed. Controls implemented by Config rules and CloudFormation hooks usually are Regional in scope. Security Hub controls usually are Regional in scope. + public let scope: ControlScope + + public init(deployableRegions: [String]? = nil, scope: ControlScope) { + self.deployableRegions = deployableRegions + self.scope = scope + } + + private enum CodingKeys: String, CodingKey { + case deployableRegions = "DeployableRegions" + case scope = "Scope" + } + } } // MARK: - Errors @@ -391,6 +533,7 @@ public struct ControlCatalogErrorType: AWSErrorType { enum Code: String { case accessDeniedException = "AccessDeniedException" case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" case throttlingException = "ThrottlingException" case validationException = "ValidationException" } @@ -417,6 +560,8 @@ public struct ControlCatalogErrorType: AWSErrorType { public static var accessDeniedException: Self { .init(.accessDeniedException) } /// An internal service error occurred during the processing of your request. Try again later. public static var internalServerException: Self { .init(.internalServerException) } + /// The requested resource does not exist. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The request was denied due to request throttling. public static var throttlingException: Self { .init(.throttlingException) } /// The request has invalid or missing parameters. diff --git a/Sources/Soto/Services/ControlTower/ControlTower_api.swift b/Sources/Soto/Services/ControlTower/ControlTower_api.swift index 838dc8692e..c5d8cbbc63 100644 --- a/Sources/Soto/Services/ControlTower/ControlTower_api.swift +++ b/Sources/Soto/Services/ControlTower/ControlTower_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS ControlTower service. /// -/// Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources: Controls DisableControl EnableControl GetEnabledControl ListControlOperations ListEnabledControls UpdateEnabledControl Landing zones CreateLandingZone DeleteLandingZone GetLandingZone GetLandingZoneOperation ListLandingZones ResetLandingZone UpdateLandingZone Baselines DisableBaseline EnableBaseline GetBaseline GetBaselineOperation GetEnabledBaseline ListBaselines ListEnabledBaselines ResetEnabledBaseline UpdateEnabledBaseline Tagging ListTagsForResource TagResource UntagResource For more information about these types of resources, see the Amazon Web Services Control Tower User Guide . About control APIs These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms "control" and "guardrail" are synonyms. To call these APIs, you'll need to know: the controlIdentifier for the control--or guardrail--you are targeting. the ARN associated with the target organizational unit (OU), which we call the targetIdentifier. the ARN associated with a resource that you wish to tag or untag. To get the controlIdentifier for your Amazon Web Services Control Tower control: The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation. The controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide. A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed. ARN format: arn:aws:controltower:{REGION}::control/{CONTROL_NAME} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED To get the targetIdentifier: The targetIdentifier is the ARN for an OU. In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} About landing zone APIs You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs. For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the "Actions" section. About baseline APIs You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines. You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines. The individual API operations for baselines are detailed in this document, the API reference manual, in the "Actions" section. For usage examples, see Baseline API input and output examples with CLI. Details and examples Control API input and output examples with CLI Baseline API input and output examples with CLI Enable controls with CloudFormation Launch a landing zone with CloudFormation Control metadata tables (large page) Control availability by Region tables (large page) List of identifiers for legacy controls Controls reference guide Controls library groupings Creating Amazon Web Services Control Tower resources with Amazon Web Services CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide. +/// Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources: Controls DisableControl EnableControl GetEnabledControl ListControlOperations ListEnabledControls UpdateEnabledControl Landing zones CreateLandingZone DeleteLandingZone GetLandingZone GetLandingZoneOperation ListLandingZones ListLandingZoneOperations ResetLandingZone UpdateLandingZone Baselines DisableBaseline EnableBaseline GetBaseline GetBaselineOperation GetEnabledBaseline ListBaselines ListEnabledBaselines ResetEnabledBaseline UpdateEnabledBaseline Tagging ListTagsForResource TagResource UntagResource For more information about these types of resources, see the Amazon Web Services Control Tower User Guide . About control APIs These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms "control" and "guardrail" are synonyms. To call these APIs, you'll need to know: the controlIdentifier for the control--or guardrail--you are targeting. the ARN associated with the target organizational unit (OU), which we call the targetIdentifier. the ARN associated with a resource that you wish to tag or untag. To get the controlIdentifier for your Amazon Web Services Control Tower control: The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation. About identifiers for Amazon Web Services Control Tower The Amazon Web Services Control Tower controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide. A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed. Some controls have two identifiers ARN format for Amazon Web Services Control Tower: arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED ARN format for Amazon Web Services Control Catalog: arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID} You can find the {CONTROL_CATALOG_OPAQUE_ID} in the Amazon Web Services Control Tower Controls Reference Guide , or in the Amazon Web Services Control Tower console, on the Control details page. The Amazon Web Services Control Tower APIs for enabled controls, such as GetEnabledControl and ListEnabledControls always return an ARN of the same type given when the control was enabled. To get the targetIdentifier: The targetIdentifier is the ARN for an OU. In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} About landing zone APIs You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs. For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the "Actions" section. About baseline APIs You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines. You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines. The individual API operations for baselines are detailed in this document, the API reference manual, in the "Actions" section. For usage examples, see Baseline API input and output examples with CLI. About Amazon Web Services Control Catalog identifiers The EnableControl and DisableControl API operations can be called by specifying either the Amazon Web Services Control Tower identifer or the Amazon Web Services Control Catalog identifier. The API response returns the same type of identifier that you specified when calling the API. If you use an Amazon Web Services Control Tower identifier to call the EnableControl API, and then call EnableControl again with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control Tower returns an error message stating that the control is already enabled. Similar behavior applies to the DisableControl API operation. Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only. Details and examples Control API input and output examples with CLI Baseline API input and output examples with CLI Enable controls with CloudFormation Launch a landing zone with CloudFormation Control metadata tables (large page) Control availability by Region tables (large page) List of identifiers for legacy controls Controls reference guide Controls library groupings Creating Amazon Web Services Control Tower resources with Amazon Web Services CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide. public struct ControlTower: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_api.swift b/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_api.swift index 20c51dd8d9..16937ff3e1 100644 --- a/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_api.swift +++ b/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_api.swift @@ -144,7 +144,7 @@ public struct CostOptimizationHub: AWSService { ) } - /// Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization Hub service. If the account is a management account of an organization, this action can also be used to enroll member accounts of the organization. You must have the appropriate permissions to opt in to Cost Optimization Hub and to view its recommendations. When you opt in, Cost Optimization Hub automatically creates a service-linked role in your account to access its data. + /// Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization Hub service. If the account is a management account or delegated administrator of an organization, this action can also be used to enroll member accounts of the organization. You must have the appropriate permissions to opt in to Cost Optimization Hub and to view its recommendations. When you opt in, Cost Optimization Hub automatically creates a service-linked role in your account to access its data. @Sendable public func updateEnrollmentStatus(_ input: UpdateEnrollmentStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnrollmentStatusResponse { return try await self.client.execute( @@ -259,6 +259,7 @@ extension CostOptimizationHub.ListRecommendationSummariesRequest: AWSPaginateTok filter: self.filter, groupBy: self.groupBy, maxResults: self.maxResults, + metrics: self.metrics, nextToken: token ) } diff --git a/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift b/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift index 7dc48a0175..bf75656492 100644 --- a/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift +++ b/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift @@ -94,6 +94,11 @@ extension CostOptimizationHub { public var description: String { return self.rawValue } } + public enum SummaryMetrics: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case savingsPercentage = "SavingsPercentage" + public var description: String { return self.rawValue } + } + public enum ResourceDetails: AWSDecodableShape, Sendable { /// The Compute Savings Plans recommendation details. case computeSavingsPlans(ComputeSavingsPlans) @@ -981,7 +986,7 @@ extension CostOptimizationHub { } public struct ListEnrollmentStatusesResponse: AWSDecodableShape { - /// The enrollment status of all member accounts in the organization if the account is the management account. + /// The enrollment status of all member accounts in the organization if the account is the management account or delegated administrator. public let includeMemberAccounts: Bool? /// The enrollment status of a specific account ID, including creation and last updated timestamps. public let items: [AccountEnrollmentStatus]? @@ -1005,15 +1010,18 @@ extension CostOptimizationHub { public let filter: Filter? /// The grouping of recommendations by a dimension. public let groupBy: String - /// The maximum number of recommendations that are returned for the request. + /// The maximum number of recommendations to be returned for the request. public let maxResults: Int? + /// Additional metrics to be returned for the request. The only valid value is savingsPercentage. + public let metrics: [SummaryMetrics]? /// The token to retrieve the next set of results. public let nextToken: String? - public init(filter: Filter? = nil, groupBy: String, maxResults: Int? = nil, nextToken: String? = nil) { + public init(filter: Filter? = nil, groupBy: String, maxResults: Int? = nil, metrics: [SummaryMetrics]? = nil, nextToken: String? = nil) { self.filter = filter self.groupBy = groupBy self.maxResults = maxResults + self.metrics = metrics self.nextToken = nextToken } @@ -1021,12 +1029,15 @@ extension CostOptimizationHub { try self.filter?.validate(name: "\(name).filter") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) + try self.validate(self.metrics, name: "metrics", parent: name, max: 100) + try self.validate(self.metrics, name: "metrics", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case filter = "filter" case groupBy = "groupBy" case maxResults = "maxResults" + case metrics = "metrics" case nextToken = "nextToken" } } @@ -1038,16 +1049,19 @@ extension CostOptimizationHub { public let estimatedTotalDedupedSavings: Double? /// The dimension used to group the recommendations by. public let groupBy: String? - /// List of all savings recommendations. + /// A list of all savings recommendations. public let items: [RecommendationSummary]? + /// The results or descriptions for the additional metrics, based on whether the metrics were or were not requested. + public let metrics: SummaryMetricsResult? /// The token to retrieve the next set of results. public let nextToken: String? - public init(currencyCode: String? = nil, estimatedTotalDedupedSavings: Double? = nil, groupBy: String? = nil, items: [RecommendationSummary]? = nil, nextToken: String? = nil) { + public init(currencyCode: String? = nil, estimatedTotalDedupedSavings: Double? = nil, groupBy: String? = nil, items: [RecommendationSummary]? = nil, metrics: SummaryMetricsResult? = nil, nextToken: String? = nil) { self.currencyCode = currencyCode self.estimatedTotalDedupedSavings = estimatedTotalDedupedSavings self.groupBy = groupBy self.items = items + self.metrics = metrics self.nextToken = nextToken } @@ -1056,6 +1070,7 @@ extension CostOptimizationHub { case estimatedTotalDedupedSavings = "estimatedTotalDedupedSavings" case groupBy = "groupBy" case items = "items" + case metrics = "metrics" case nextToken = "nextToken" } } @@ -1736,6 +1751,19 @@ extension CostOptimizationHub { } } + public struct SummaryMetricsResult: AWSDecodableShape { + /// The savings percentage based on your Amazon Web Services spend over the past 30 days. Savings percentage is only supported when filtering by Region, account ID, or tags. + public let savingsPercentage: String? + + public init(savingsPercentage: String? = nil) { + self.savingsPercentage = savingsPercentage + } + + private enum CodingKeys: String, CodingKey { + case savingsPercentage = "savingsPercentage" + } + } + public struct Tag: AWSEncodableShape & AWSDecodableShape { /// The key that's associated with the tag. public let key: String? @@ -1754,7 +1782,7 @@ extension CostOptimizationHub { } public struct UpdateEnrollmentStatusRequest: AWSEncodableShape { - /// Indicates whether to enroll member accounts of the organization if the account is the management account. + /// Indicates whether to enroll member accounts of the organization if the account is the management account or delegated administrator. public let includeMemberAccounts: Bool? /// Sets the account status. public let status: EnrollmentStatus diff --git a/Sources/Soto/Services/DataZone/DataZone_api.swift b/Sources/Soto/Services/DataZone/DataZone_api.swift index 46b7fd3890..8496324d28 100644 --- a/Sources/Soto/Services/DataZone/DataZone_api.swift +++ b/Sources/Soto/Services/DataZone/DataZone_api.swift @@ -72,12 +72,9 @@ public struct DataZone: AWSService { /// custom endpoints for regions static var serviceEndpoints: [String: String] {[ - "af-south-1": "datazone.af-south-1.api.aws", - "ap-east-1": "datazone.ap-east-1.api.aws", "ap-northeast-1": "datazone.ap-northeast-1.api.aws", "ap-northeast-2": "datazone.ap-northeast-2.api.aws", "ap-northeast-3": "datazone.ap-northeast-3.api.aws", - "ap-south-1": "datazone.ap-south-1.api.aws", "ap-south-2": "datazone.ap-south-2.api.aws", "ap-southeast-1": "datazone.ap-southeast-1.api.aws", "ap-southeast-2": "datazone.ap-southeast-2.api.aws", @@ -88,10 +85,8 @@ public struct DataZone: AWSService { "cn-north-1": "datazone.cn-north-1.api.amazonwebservices.com.cn", "cn-northwest-1": "datazone.cn-northwest-1.api.amazonwebservices.com.cn", "eu-central-1": "datazone.eu-central-1.api.aws", - "eu-central-2": "datazone.eu-central-2.api.aws", "eu-north-1": "datazone.eu-north-1.api.aws", "eu-south-1": "datazone.eu-south-1.api.aws", - "eu-south-2": "datazone.eu-south-2.api.aws", "eu-west-1": "datazone.eu-west-1.api.aws", "eu-west-2": "datazone.eu-west-2.api.aws", "eu-west-3": "datazone.eu-west-3.api.aws", @@ -111,12 +106,9 @@ public struct DataZone: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ - "af-south-1": "datazone-fips.af-south-1.api.aws", - "ap-east-1": "datazone-fips.ap-east-1.api.aws", "ap-northeast-1": "datazone-fips.ap-northeast-1.api.aws", "ap-northeast-2": "datazone-fips.ap-northeast-2.api.aws", "ap-northeast-3": "datazone-fips.ap-northeast-3.api.aws", - "ap-south-1": "datazone-fips.ap-south-1.api.aws", "ap-south-2": "datazone-fips.ap-south-2.api.aws", "ap-southeast-1": "datazone-fips.ap-southeast-1.api.aws", "ap-southeast-2": "datazone-fips.ap-southeast-2.api.aws", @@ -127,10 +119,8 @@ public struct DataZone: AWSService { "cn-north-1": "datazone-fips.cn-north-1.api.amazonwebservices.com.cn", "cn-northwest-1": "datazone-fips.cn-northwest-1.api.amazonwebservices.com.cn", "eu-central-1": "datazone-fips.eu-central-1.api.aws", - "eu-central-2": "datazone-fips.eu-central-2.api.aws", "eu-north-1": "datazone-fips.eu-north-1.api.aws", "eu-south-1": "datazone-fips.eu-south-1.api.aws", - "eu-south-2": "datazone-fips.eu-south-2.api.aws", "eu-west-1": "datazone-fips.eu-west-1.api.aws", "eu-west-2": "datazone-fips.eu-west-2.api.aws", "eu-west-3": "datazone-fips.eu-west-3.api.aws", @@ -227,6 +217,19 @@ public struct DataZone: AWSService { ) } + /// Creates a data asset filter. + @Sendable + public func createAssetFilter(_ input: CreateAssetFilterInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAssetFilterOutput { + return try await self.client.execute( + operation: "CreateAssetFilter", + path: "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a revision of the asset. @Sendable public func createAssetRevision(_ input: CreateAssetRevisionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAssetRevisionOutput { @@ -253,6 +256,32 @@ public struct DataZone: AWSService { ) } + /// Creates a data product. + @Sendable + public func createDataProduct(_ input: CreateDataProductInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataProductOutput { + return try await self.client.execute( + operation: "CreateDataProduct", + path: "/v2/domains/{domainIdentifier}/data-products", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a data product revision. + @Sendable + public func createDataProductRevision(_ input: CreateDataProductRevisionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataProductRevisionOutput { + return try await self.client.execute( + operation: "CreateDataProductRevision", + path: "/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an Amazon DataZone data source. @Sendable public func createDataSource(_ input: CreateDataSourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataSourceOutput { @@ -461,7 +490,7 @@ public struct DataZone: AWSService { ) } - /// Delets an asset in Amazon DataZone. + /// Deletes an asset in Amazon DataZone. @Sendable public func deleteAsset(_ input: DeleteAssetInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAssetOutput { return try await self.client.execute( @@ -474,6 +503,19 @@ public struct DataZone: AWSService { ) } + /// Deletes an asset filter. + @Sendable + public func deleteAssetFilter(_ input: DeleteAssetFilterInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteAssetFilter", + path: "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an asset type in Amazon DataZone. @Sendable public func deleteAssetType(_ input: DeleteAssetTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAssetTypeOutput { @@ -487,6 +529,19 @@ public struct DataZone: AWSService { ) } + /// Deletes an data product in Amazon DataZone. + @Sendable + public func deleteDataProduct(_ input: DeleteDataProductInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataProductOutput { + return try await self.client.execute( + operation: "DeleteDataProduct", + path: "/v2/domains/{domainIdentifier}/data-products/{identifier}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a data source in Amazon DataZone. @Sendable public func deleteDataSource(_ input: DeleteDataSourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataSourceOutput { @@ -721,6 +776,19 @@ public struct DataZone: AWSService { ) } + /// Gets an asset filter. + @Sendable + public func getAssetFilter(_ input: GetAssetFilterInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAssetFilterOutput { + return try await self.client.execute( + operation: "GetAssetFilter", + path: "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets an Amazon DataZone asset type. @Sendable public func getAssetType(_ input: GetAssetTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAssetTypeOutput { @@ -734,6 +802,19 @@ public struct DataZone: AWSService { ) } + /// Gets the data product. + @Sendable + public func getDataProduct(_ input: GetDataProductInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataProductOutput { + return try await self.client.execute( + operation: "GetDataProduct", + path: "/v2/domains/{domainIdentifier}/data-products/{identifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets an Amazon DataZone data source. @Sendable public func getDataSource(_ input: GetDataSourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataSourceOutput { @@ -825,6 +906,19 @@ public struct DataZone: AWSService { ) } + /// Gets the credentials of an environment in Amazon DataZone. + @Sendable + public func getEnvironmentCredentials(_ input: GetEnvironmentCredentialsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEnvironmentCredentialsOutput { + return try await self.client.execute( + operation: "GetEnvironmentCredentials", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/credentials", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets an evinronment profile in Amazon DataZone. @Sendable public func getEnvironmentProfile(_ input: GetEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEnvironmentProfileOutput { @@ -916,7 +1010,7 @@ public struct DataZone: AWSService { ) } - /// Gets a listing (a record of an asset at a given time). + /// Gets a listing (a record of an asset at a given time). If you specify a listing version, only details that are specific to that version are returned. @Sendable public func getListing(_ input: GetListingInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetListingOutput { return try await self.client.execute( @@ -1033,6 +1127,19 @@ public struct DataZone: AWSService { ) } + /// Lists asset filters. + @Sendable + public func listAssetFilters(_ input: ListAssetFiltersInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAssetFiltersOutput { + return try await self.client.execute( + operation: "ListAssetFilters", + path: "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the revisions for the asset. @Sendable public func listAssetRevisions(_ input: ListAssetRevisionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAssetRevisionsOutput { @@ -1046,6 +1153,19 @@ public struct DataZone: AWSService { ) } + /// Lists data product revisions. + @Sendable + public func listDataProductRevisions(_ input: ListDataProductRevisionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataProductRevisionsOutput { + return try await self.client.execute( + operation: "ListDataProductRevisions", + path: "/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists data source run activities. @Sendable public func listDataSourceRunActivities(_ input: ListDataSourceRunActivitiesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataSourceRunActivitiesOutput { @@ -1501,6 +1621,19 @@ public struct DataZone: AWSService { ) } + /// Updates an asset filter. + @Sendable + public func updateAssetFilter(_ input: UpdateAssetFilterInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAssetFilterOutput { + return try await self.client.execute( + operation: "UpdateAssetFilter", + path: "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the specified data source in Amazon DataZone. @Sendable public func updateDataSource(_ input: UpdateDataSourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataSourceOutput { @@ -1684,6 +1817,44 @@ extension DataZone { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension DataZone { + /// Lists asset filters. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listAssetFiltersPaginator( + _ input: ListAssetFiltersInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAssetFilters, + inputKey: \ListAssetFiltersInput.nextToken, + outputKey: \ListAssetFiltersOutput.nextToken, + logger: logger + ) + } + + /// Lists data product revisions. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listDataProductRevisionsPaginator( + _ input: ListDataProductRevisionsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataProductRevisions, + inputKey: \ListDataProductRevisionsInput.nextToken, + outputKey: \ListDataProductRevisionsOutput.nextToken, + logger: logger + ) + } + /// Lists data source run activities. /// Return PaginatorSequence for operation. /// @@ -2141,6 +2312,29 @@ extension DataZone { } } +extension DataZone.ListAssetFiltersInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListAssetFiltersInput { + return .init( + assetIdentifier: self.assetIdentifier, + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + status: self.status + ) + } +} + +extension DataZone.ListDataProductRevisionsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListDataProductRevisionsInput { + return .init( + domainIdentifier: self.domainIdentifier, + identifier: self.identifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension DataZone.ListDataSourceRunActivitiesInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> DataZone.ListDataSourceRunActivitiesInput { return .init( @@ -2331,6 +2525,7 @@ extension DataZone.ListSubscriptionGrantsInput: AWSPaginateToken { environmentId: self.environmentId, maxResults: self.maxResults, nextToken: token, + owningProjectId: self.owningProjectId, sortBy: self.sortBy, sortOrder: self.sortOrder, subscribedListingId: self.subscribedListingId, diff --git a/Sources/Soto/Services/DataZone/DataZone_shapes.swift b/Sources/Soto/Services/DataZone/DataZone_shapes.swift index 384122b6f7..9076b49739 100644 --- a/Sources/Soto/Services/DataZone/DataZone_shapes.swift +++ b/Sources/Soto/Services/DataZone/DataZone_shapes.swift @@ -62,6 +62,18 @@ extension DataZone { public var description: String { return self.rawValue } } + public enum DataProductItemType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case asset = "ASSET" + public var description: String { return self.rawValue } + } + + public enum DataProductStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case createFailed = "CREATE_FAILED" + case created = "CREATED" + case creating = "CREATING" + public var description: String { return self.rawValue } + } + public enum DataSourceErrorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accessDeniedException = "ACCESS_DENIED_EXCEPTION" case conflictException = "CONFLICT_EXCEPTION" @@ -139,6 +151,7 @@ extension DataZone { public enum EntityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case asset = "ASSET" + case dataProduct = "DATA_PRODUCT" public var description: String { return self.rawValue } } @@ -165,6 +178,12 @@ extension DataZone { public var description: String { return self.rawValue } } + public enum FilterStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case invalid = "INVALID" + case valid = "VALID" + public var description: String { return self.rawValue } + } + public enum FormTypeStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -197,6 +216,7 @@ extension DataZone { public enum InventorySearchScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case asset = "ASSET" + case dataProduct = "DATA_PRODUCT" case glossary = "GLOSSARY" case glossaryTerm = "GLOSSARY_TERM" public var description: String { return self.rawValue } @@ -460,6 +480,47 @@ extension DataZone { public var description: String { return self.rawValue } } + public enum AssetFilterConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The column configuration of the asset filter. + case columnConfiguration(ColumnFilterConfiguration) + /// The row configuration of the asset filter. + case rowConfiguration(RowFilterConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .columnConfiguration: + let value = try container.decode(ColumnFilterConfiguration.self, forKey: .columnConfiguration) + self = .columnConfiguration(value) + case .rowConfiguration: + let value = try container.decode(RowFilterConfiguration.self, forKey: .rowConfiguration) + self = .rowConfiguration(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .columnConfiguration(let value): + try container.encode(value, forKey: .columnConfiguration) + case .rowConfiguration(let value): + try container.encode(value, forKey: .rowConfiguration) + } + } + + private enum CodingKeys: String, CodingKey { + case columnConfiguration = "columnConfiguration" + case rowConfiguration = "rowConfiguration" + } + } + public enum DataSourceConfigurationInput: AWSEncodableShape, Sendable { /// The configuration of the Amazon Web Services Glue data source. case glueRunConfiguration(GlueRunConfigurationInput) @@ -559,6 +620,37 @@ extension DataZone { } } + public enum ListingItem: AWSDecodableShape, Sendable { + /// An asset published in an Amazon DataZone catalog. + case assetListing(AssetListing) + /// The data product listing. + case dataProductListing(DataProductListing) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .assetListing: + let value = try container.decode(AssetListing.self, forKey: .assetListing) + self = .assetListing(value) + case .dataProductListing: + let value = try container.decode(DataProductListing.self, forKey: .dataProductListing) + self = .dataProductListing(value) + } + } + + private enum CodingKeys: String, CodingKey { + case assetListing = "assetListing" + case dataProductListing = "dataProductListing" + } + } + public enum Member: AWSEncodableShape, Sendable { /// The ID of the group of a project member. case groupIdentifier(String) @@ -653,11 +745,181 @@ extension DataZone { } } + public enum RowFilter: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The 'and' clause of the row filter. + case and([RowFilter]) + /// The expression of the row filter. + case expression(RowFilterExpression) + /// The 'or' clause of the row filter. + case or([RowFilter]) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .and: + let value = try container.decode([RowFilter].self, forKey: .and) + self = .and(value) + case .expression: + let value = try container.decode(RowFilterExpression.self, forKey: .expression) + self = .expression(value) + case .or: + let value = try container.decode([RowFilter].self, forKey: .or) + self = .or(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .and(let value): + try container.encode(value, forKey: .and) + case .expression(let value): + try container.encode(value, forKey: .expression) + case .or(let value): + try container.encode(value, forKey: .or) + } + } + + private enum CodingKeys: String, CodingKey { + case and = "and" + case expression = "expression" + case or = "or" + } + } + + public enum RowFilterExpression: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The 'equal to' clause of the row filter expression. + case equalTo(EqualToExpression) + /// The 'greater than' clause of the row filter expression. + case greaterThan(GreaterThanExpression) + /// The 'greater than or equal to' clause of the filter expression. + case greaterThanOrEqualTo(GreaterThanOrEqualToExpression) + /// The 'in' clause of the row filter expression. + case `in`(InExpression) + /// The 'is not null' clause of the row filter expression. + case isNotNull(IsNotNullExpression) + /// The 'is null' clause of the row filter expression. + case isNull(IsNullExpression) + /// The 'less than' clause of the row filter expression. + case lessThan(LessThanExpression) + /// The 'less than or equal to' clause of the row filter expression. + case lessThanOrEqualTo(LessThanOrEqualToExpression) + /// The 'like' clause of the row filter expression. + case like(LikeExpression) + /// The 'no equal to' clause of the row filter expression. + case notEqualTo(NotEqualToExpression) + /// The 'not in' clause of the row filter expression. + case notIn(NotInExpression) + /// The 'not like' clause of the row filter expression. + case notLike(NotLikeExpression) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .equalTo: + let value = try container.decode(EqualToExpression.self, forKey: .equalTo) + self = .equalTo(value) + case .greaterThan: + let value = try container.decode(GreaterThanExpression.self, forKey: .greaterThan) + self = .greaterThan(value) + case .greaterThanOrEqualTo: + let value = try container.decode(GreaterThanOrEqualToExpression.self, forKey: .greaterThanOrEqualTo) + self = .greaterThanOrEqualTo(value) + case .`in`: + let value = try container.decode(InExpression.self, forKey: .`in`) + self = .`in`(value) + case .isNotNull: + let value = try container.decode(IsNotNullExpression.self, forKey: .isNotNull) + self = .isNotNull(value) + case .isNull: + let value = try container.decode(IsNullExpression.self, forKey: .isNull) + self = .isNull(value) + case .lessThan: + let value = try container.decode(LessThanExpression.self, forKey: .lessThan) + self = .lessThan(value) + case .lessThanOrEqualTo: + let value = try container.decode(LessThanOrEqualToExpression.self, forKey: .lessThanOrEqualTo) + self = .lessThanOrEqualTo(value) + case .like: + let value = try container.decode(LikeExpression.self, forKey: .like) + self = .like(value) + case .notEqualTo: + let value = try container.decode(NotEqualToExpression.self, forKey: .notEqualTo) + self = .notEqualTo(value) + case .notIn: + let value = try container.decode(NotInExpression.self, forKey: .notIn) + self = .notIn(value) + case .notLike: + let value = try container.decode(NotLikeExpression.self, forKey: .notLike) + self = .notLike(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .equalTo(let value): + try container.encode(value, forKey: .equalTo) + case .greaterThan(let value): + try container.encode(value, forKey: .greaterThan) + case .greaterThanOrEqualTo(let value): + try container.encode(value, forKey: .greaterThanOrEqualTo) + case .`in`(let value): + try container.encode(value, forKey: .`in`) + case .isNotNull(let value): + try container.encode(value, forKey: .isNotNull) + case .isNull(let value): + try container.encode(value, forKey: .isNull) + case .lessThan(let value): + try container.encode(value, forKey: .lessThan) + case .lessThanOrEqualTo(let value): + try container.encode(value, forKey: .lessThanOrEqualTo) + case .like(let value): + try container.encode(value, forKey: .like) + case .notEqualTo(let value): + try container.encode(value, forKey: .notEqualTo) + case .notIn(let value): + try container.encode(value, forKey: .notIn) + case .notLike(let value): + try container.encode(value, forKey: .notLike) + } + } + + private enum CodingKeys: String, CodingKey { + case equalTo = "equalTo" + case greaterThan = "greaterThan" + case greaterThanOrEqualTo = "greaterThanOrEqualTo" + case `in` = "in" + case isNotNull = "isNotNull" + case isNull = "isNull" + case lessThan = "lessThan" + case lessThanOrEqualTo = "lessThanOrEqualTo" + case like = "like" + case notEqualTo = "notEqualTo" + case notIn = "notIn" + case notLike = "notLike" + } + } + public enum SearchInventoryResultItem: AWSDecodableShape, Sendable { /// The asset item included in the search results. case assetItem(AssetItem) - /// The data product item included in the search results. - case dataProductItem(DataProductSummary) + /// The data product. + case dataProductItem(DataProductResultItem) /// The glossary item included in the search results. case glossaryItem(GlossaryItem) /// The glossary term item included in the search results. @@ -677,7 +939,7 @@ extension DataZone { let value = try container.decode(AssetItem.self, forKey: .assetItem) self = .assetItem(value) case .dataProductItem: - let value = try container.decode(DataProductSummary.self, forKey: .dataProductItem) + let value = try container.decode(DataProductResultItem.self, forKey: .dataProductItem) self = .dataProductItem(value) case .glossaryItem: let value = try container.decode(GlossaryItem.self, forKey: .glossaryItem) @@ -696,6 +958,37 @@ extension DataZone { } } + public enum SearchResultItem: AWSDecodableShape, Sendable { + /// The asset listing included in the results of the SearchListings action. + case assetListing(AssetListingItem) + /// The data product listing. + case dataProductListing(DataProductListingItem) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .assetListing: + let value = try container.decode(AssetListingItem.self, forKey: .assetListing) + self = .assetListing(value) + case .dataProductListing: + let value = try container.decode(DataProductListingItem.self, forKey: .dataProductListing) + self = .dataProductListing(value) + } + } + + private enum CodingKeys: String, CodingKey { + case assetListing = "assetListing" + case dataProductListing = "dataProductListing" + } + } + public enum SearchTypesResultItem: AWSDecodableShape, Sendable { /// The asset type included in the results of the SearchTypes action. case assetTypeItem(AssetTypeItem) @@ -764,6 +1057,37 @@ extension DataZone { } } + public enum SubscribedListingItem: AWSDecodableShape, Sendable { + /// The asset for which the subscription grant is created. + case assetListing(SubscribedAssetListing) + /// The data product listing. + case productListing(SubscribedProductListing) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .assetListing: + let value = try container.decode(SubscribedAssetListing.self, forKey: .assetListing) + self = .assetListing(value) + case .productListing: + let value = try container.decode(SubscribedProductListing.self, forKey: .productListing) + self = .productListing(value) + } + } + + private enum CodingKeys: String, CodingKey { + case assetListing = "assetListing" + case productListing = "productListing" + } + } + public enum UserProfileDetails: AWSDecodableShape, Sendable { /// The IAM details included in the user profile details. case iam(IamUserProfileDetails) @@ -1006,6 +1330,76 @@ extension DataZone { } } + public struct AssetFilterSummary: AWSDecodableShape { + /// The ID of the data asset. + public let assetId: String + /// The timestamp at which the asset filter was created. + public let createdAt: Date? + /// The description of the asset filter. + public let description: String? + /// The ID of the domain where the asset filter lives. + public let domainId: String + /// The effective column names of the asset filter. + public let effectiveColumnNames: [String]? + /// The effective row filter of the asset filter. + public let effectiveRowFilter: String? + /// The error message that is displayed if the action does not succeed. + public let errorMessage: String? + /// The ID of the asset filter. + public let id: String + /// The name of the asset filter. + public let name: String + /// The status of the asset filter. + public let status: FilterStatus? + + public init(assetId: String, createdAt: Date? = nil, description: String? = nil, domainId: String, effectiveColumnNames: [String]? = nil, effectiveRowFilter: String? = nil, errorMessage: String? = nil, id: String, name: String, status: FilterStatus? = nil) { + self.assetId = assetId + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.effectiveColumnNames = effectiveColumnNames + self.effectiveRowFilter = effectiveRowFilter + self.errorMessage = errorMessage + self.id = id + self.name = name + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case effectiveColumnNames = "effectiveColumnNames" + case effectiveRowFilter = "effectiveRowFilter" + case errorMessage = "errorMessage" + case id = "id" + case name = "name" + case status = "status" + } + } + + public struct AssetInDataProductListingItem: AWSDecodableShape { + /// The entity ID of the listing of the asset in a data product. + public let entityId: String? + /// The entity revision of the listing of the asset in a data product. + public let entityRevision: String? + /// The entity type of the listing of the asset in a data product. + public let entityType: String? + + public init(entityId: String? = nil, entityRevision: String? = nil, entityType: String? = nil) { + self.entityId = entityId + self.entityRevision = entityRevision + self.entityType = entityType + } + + private enum CodingKeys: String, CodingKey { + case entityId = "entityId" + case entityRevision = "entityRevision" + case entityType = "entityType" + } + } + public struct AssetItem: AWSDecodableShape { /// The additional attributes of a Amazon DataZone inventory asset. public let additionalAttributes: AssetItemAdditionalAttributes? @@ -1517,6 +1911,19 @@ extension DataZone { } } + public struct ColumnFilterConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether to include column names. + public let includedColumnNames: [String]? + + public init(includedColumnNames: [String]? = nil) { + self.includedColumnNames = includedColumnNames + } + + private enum CodingKeys: String, CodingKey { + case includedColumnNames = "includedColumnNames" + } + } + public struct ConfigurableActionParameter: AWSDecodableShape { /// The key of the configurable action parameter. public let key: String? @@ -1555,78 +1962,182 @@ extension DataZone { } } - public struct CreateAssetInput: AWSEncodableShape { + public struct CreateAssetFilterInput: AWSEncodableShape { + /// The ID of the data asset. + public let assetIdentifier: String /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. public let clientToken: String? - /// Asset description. + /// The configuration of the asset filter. + public let configuration: AssetFilterConfiguration + /// The description of the asset filter. public let description: String? - /// Amazon DataZone domain where the asset is created. + /// The ID of the domain in which you want to create an asset filter. public let domainIdentifier: String - /// The external identifier of the asset. - public let externalIdentifier: String? - /// Metadata forms attached to the asset. - public let formsInput: [FormInput]? - /// Glossary terms attached to the asset. - public let glossaryTerms: [String]? - /// Asset name. + /// The name of the asset filter. public let name: String - /// The unique identifier of the project that owns this asset. - public let owningProjectIdentifier: String - /// The configuration of the automatically generated business-friendly metadata for the asset. - public let predictionConfiguration: PredictionConfiguration? - /// The unique identifier of this asset's type. - public let typeIdentifier: String - /// The revision of this asset's type. - public let typeRevision: String? - public init(clientToken: String? = CreateAssetInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, externalIdentifier: String? = nil, formsInput: [FormInput]? = nil, glossaryTerms: [String]? = nil, name: String, owningProjectIdentifier: String, predictionConfiguration: PredictionConfiguration? = nil, typeIdentifier: String, typeRevision: String? = nil) { + public init(assetIdentifier: String, clientToken: String? = CreateAssetFilterInput.idempotencyToken(), configuration: AssetFilterConfiguration, description: String? = nil, domainIdentifier: String, name: String) { + self.assetIdentifier = assetIdentifier self.clientToken = clientToken + self.configuration = configuration self.description = description self.domainIdentifier = domainIdentifier - self.externalIdentifier = externalIdentifier - self.formsInput = formsInput - self.glossaryTerms = glossaryTerms self.name = name - self.owningProjectIdentifier = owningProjectIdentifier - self.predictionConfiguration = predictionConfiguration - self.typeIdentifier = typeIdentifier - self.typeRevision = typeRevision } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assetIdentifier, key: "assetIdentifier") try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encode(self.configuration, forKey: .configuration) try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.domainIdentifier, key: "domainIdentifier") - try container.encodeIfPresent(self.externalIdentifier, forKey: .externalIdentifier) - try container.encodeIfPresent(self.formsInput, forKey: .formsInput) - try container.encodeIfPresent(self.glossaryTerms, forKey: .glossaryTerms) try container.encode(self.name, forKey: .name) - try container.encode(self.owningProjectIdentifier, forKey: .owningProjectIdentifier) - try container.encodeIfPresent(self.predictionConfiguration, forKey: .predictionConfiguration) - try container.encode(self.typeIdentifier, forKey: .typeIdentifier) - try container.encodeIfPresent(self.typeRevision, forKey: .typeRevision) } public func validate(name: String) throws { - try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) - try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.assetIdentifier, name: "assetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") try self.validate(self.description, name: "description", parent: name, max: 2048) try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") - try self.validate(self.externalIdentifier, name: "externalIdentifier", parent: name, max: 256) - try self.validate(self.externalIdentifier, name: "externalIdentifier", parent: name, min: 1) - try self.formsInput?.forEach { - try $0.validate(name: "\(name).formsInput[]") - } - try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) - try self.glossaryTerms?.forEach { - try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") - } - try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) - try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case configuration = "configuration" + case description = "description" + case name = "name" + } + } + + public struct CreateAssetFilterOutput: AWSDecodableShape { + /// The ID of the asset. + public let assetId: String + /// The configuration of the asset filter. + public let configuration: AssetFilterConfiguration + /// The timestamp at which the asset filter was created. + public let createdAt: Date? + /// The description of the asset filter. + public let description: String? + /// The ID of the domain where the asset filter is created. + public let domainId: String + /// The column names in the asset filter. + public let effectiveColumnNames: [String]? + /// The row filter in the asset filter. + public let effectiveRowFilter: String? + /// The error message that is displayed if the asset filter is not created successfully. + public let errorMessage: String? + /// The ID of the asset filter. + public let id: String + /// The name of the asset filter. + public let name: String + /// The status of the asset filter. + public let status: FilterStatus? + + public init(assetId: String, configuration: AssetFilterConfiguration, createdAt: Date? = nil, description: String? = nil, domainId: String, effectiveColumnNames: [String]? = nil, effectiveRowFilter: String? = nil, errorMessage: String? = nil, id: String, name: String, status: FilterStatus? = nil) { + self.assetId = assetId + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.effectiveColumnNames = effectiveColumnNames + self.effectiveRowFilter = effectiveRowFilter + self.errorMessage = errorMessage + self.id = id + self.name = name + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case effectiveColumnNames = "effectiveColumnNames" + case effectiveRowFilter = "effectiveRowFilter" + case errorMessage = "errorMessage" + case id = "id" + case name = "name" + case status = "status" + } + } + + public struct CreateAssetInput: AWSEncodableShape { + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// Asset description. + public let description: String? + /// Amazon DataZone domain where the asset is created. + public let domainIdentifier: String + /// The external identifier of the asset. + public let externalIdentifier: String? + /// Metadata forms attached to the asset. + public let formsInput: [FormInput]? + /// Glossary terms attached to the asset. + public let glossaryTerms: [String]? + /// Asset name. + public let name: String + /// The unique identifier of the project that owns this asset. + public let owningProjectIdentifier: String + /// The configuration of the automatically generated business-friendly metadata for the asset. + public let predictionConfiguration: PredictionConfiguration? + /// The unique identifier of this asset's type. + public let typeIdentifier: String + /// The revision of this asset's type. + public let typeRevision: String? + + public init(clientToken: String? = CreateAssetInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, externalIdentifier: String? = nil, formsInput: [FormInput]? = nil, glossaryTerms: [String]? = nil, name: String, owningProjectIdentifier: String, predictionConfiguration: PredictionConfiguration? = nil, typeIdentifier: String, typeRevision: String? = nil) { + self.clientToken = clientToken + self.description = description + self.domainIdentifier = domainIdentifier + self.externalIdentifier = externalIdentifier + self.formsInput = formsInput + self.glossaryTerms = glossaryTerms + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + self.predictionConfiguration = predictionConfiguration + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + try container.encodeIfPresent(self.externalIdentifier, forKey: .externalIdentifier) + try container.encodeIfPresent(self.formsInput, forKey: .formsInput) + try container.encodeIfPresent(self.glossaryTerms, forKey: .glossaryTerms) + try container.encode(self.name, forKey: .name) + try container.encode(self.owningProjectIdentifier, forKey: .owningProjectIdentifier) + try container.encodeIfPresent(self.predictionConfiguration, forKey: .predictionConfiguration) + try container.encode(self.typeIdentifier, forKey: .typeIdentifier) + try container.encodeIfPresent(self.typeRevision, forKey: .typeRevision) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.externalIdentifier, name: "externalIdentifier", parent: name, max: 256) + try self.validate(self.externalIdentifier, name: "externalIdentifier", parent: name, min: 1) + try self.formsInput?.forEach { + try $0.validate(name: "\(name).formsInput[]") + } + try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 256) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, max: 513) @@ -1775,17 +2286,289 @@ extension DataZone { request.encodePath(self.domainIdentifier, key: "domainIdentifier") try container.encodeIfPresent(self.formsInput, forKey: .formsInput) try container.encodeIfPresent(self.glossaryTerms, forKey: .glossaryTerms) - request.encodePath(self.identifier, key: "identifier") + request.encodePath(self.identifier, key: "identifier") + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.predictionConfiguration, forKey: .predictionConfiguration) + try container.encodeIfPresent(self.typeRevision, forKey: .typeRevision) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.formsInput?.forEach { + try $0.validate(name: "\(name).formsInput[]") + } + try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, max: 64) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case formsInput = "formsInput" + case glossaryTerms = "glossaryTerms" + case name = "name" + case predictionConfiguration = "predictionConfiguration" + case typeRevision = "typeRevision" + } + } + + public struct CreateAssetRevisionOutput: AWSDecodableShape { + /// The timestamp of when the asset revision occured. + public let createdAt: Date? + /// The Amazon DataZone user who performed the asset revision. + public let createdBy: String? + /// The revised asset description. + public let description: String? + /// The unique identifier of the Amazon DataZone domain where the asset was revised. + public let domainId: String + /// The external identifier of the asset. + public let externalIdentifier: String? + /// The timestamp of when the first asset revision occured. + public let firstRevisionCreatedAt: Date? + /// The Amazon DataZone user who performed the first asset revision. + public let firstRevisionCreatedBy: String? + /// The metadata forms that were attached to the asset as part of the asset revision. + public let formsOutput: [FormOutput] + /// The glossary terms that were attached to the asset as part of asset revision. + public let glossaryTerms: [String]? + /// The unique identifier of the asset revision. + public let id: String + /// The latest data point that was imported into the time series form for the asset. + public let latestTimeSeriesDataPointFormsOutput: [TimeSeriesDataPointSummaryFormOutput]? + /// The details of an asset published in an Amazon DataZone catalog. + public let listing: AssetListingDetails? + /// The revised name of the asset. + public let name: String + /// The unique identifier of the revised project that owns the asset. + public let owningProjectId: String + /// The configuration of the automatically generated business-friendly metadata for the asset. + public let predictionConfiguration: PredictionConfiguration? + /// The read-only metadata forms that were attached to the asset as part of the asset revision. + public let readOnlyFormsOutput: [FormOutput]? + /// The revision of the asset. + public let revision: String + /// The identifier of the revision type. + public let typeIdentifier: String + /// The revision type of the asset. + public let typeRevision: String + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, externalIdentifier: String? = nil, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput], glossaryTerms: [String]? = nil, id: String, latestTimeSeriesDataPointFormsOutput: [TimeSeriesDataPointSummaryFormOutput]? = nil, listing: AssetListingDetails? = nil, name: String, owningProjectId: String, predictionConfiguration: PredictionConfiguration? = nil, readOnlyFormsOutput: [FormOutput]? = nil, revision: String, typeIdentifier: String, typeRevision: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.externalIdentifier = externalIdentifier + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy + self.formsOutput = formsOutput + self.glossaryTerms = glossaryTerms + self.id = id + self.latestTimeSeriesDataPointFormsOutput = latestTimeSeriesDataPointFormsOutput + self.listing = listing + self.name = name + self.owningProjectId = owningProjectId + self.predictionConfiguration = predictionConfiguration + self.readOnlyFormsOutput = readOnlyFormsOutput + self.revision = revision + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case externalIdentifier = "externalIdentifier" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" + case formsOutput = "formsOutput" + case glossaryTerms = "glossaryTerms" + case id = "id" + case latestTimeSeriesDataPointFormsOutput = "latestTimeSeriesDataPointFormsOutput" + case listing = "listing" + case name = "name" + case owningProjectId = "owningProjectId" + case predictionConfiguration = "predictionConfiguration" + case readOnlyFormsOutput = "readOnlyFormsOutput" + case revision = "revision" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct CreateAssetTypeInput: AWSEncodableShape { + /// The descripton of the custom asset type. + public let description: String? + /// The unique identifier of the Amazon DataZone domain where the custom asset type is being created. + public let domainIdentifier: String + /// The metadata forms that are to be attached to the custom asset type. + public let formsInput: [String: FormEntryInput] + /// The name of the custom asset type. + public let name: String + /// The identifier of the Amazon DataZone project that is to own the custom asset type. + public let owningProjectIdentifier: String + + public init(description: String? = nil, domainIdentifier: String, formsInput: [String: FormEntryInput], name: String, owningProjectIdentifier: String) { + self.description = description + self.domainIdentifier = domainIdentifier + self.formsInput = formsInput + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + try container.encode(self.formsInput, forKey: .formsInput) + try container.encode(self.name, forKey: .name) + try container.encode(self.owningProjectIdentifier, forKey: .owningProjectIdentifier) + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.formsInput.forEach { + try validate($0.key, name: "formsInput.key", parent: name, max: 128) + try validate($0.key, name: "formsInput.key", parent: name, min: 1) + try validate($0.key, name: "formsInput.key", parent: name, pattern: "^(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$") + try $0.value.validate(name: "\(name).formsInput[\"\($0.key)\"]") + } + try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\.]*") + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case formsInput = "formsInput" + case name = "name" + case owningProjectIdentifier = "owningProjectIdentifier" + } + } + + public struct CreateAssetTypeOutput: AWSDecodableShape { + /// The timestamp of when the asset type is to be created. + public let createdAt: Date? + /// The Amazon DataZone user who creates this custom asset type. + public let createdBy: String? + /// The description of the custom asset type. + public let description: String? + /// The ID of the Amazon DataZone domain in which the asset type was created. + public let domainId: String + /// The metadata forms that are attached to the asset type. + public let formsOutput: [String: FormEntryOutput] + /// The name of the asset type. + public let name: String + /// The ID of the Amazon DataZone domain where the asset type was originally created. + public let originDomainId: String? + /// The ID of the Amazon DataZone project where the asset type was originally created. + public let originProjectId: String? + /// The ID of the Amazon DataZone project that currently owns this asset type. + public let owningProjectId: String? + /// The revision of the custom asset type. + public let revision: String + /// The timestamp of when the custom type was created. + public let updatedAt: Date? + /// The Amazon DataZone user that created the custom asset type. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String? = nil, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.formsOutput = formsOutput + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case formsOutput = "formsOutput" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CreateDataProductInput: AWSEncodableShape { + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The description of the data product. + public let description: String? + /// The ID of the domain where the data product is created. + public let domainIdentifier: String + /// The metadata forms of the data product. + public let formsInput: [FormInput]? + /// The glossary terms of the data product. + public let glossaryTerms: [String]? + /// The data assets of the data product. + public let items: [DataProductItem]? + /// The name of the data product. + public let name: String + /// The ID of the owning project of the data product. + public let owningProjectIdentifier: String + + public init(clientToken: String? = CreateDataProductInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, formsInput: [FormInput]? = nil, glossaryTerms: [String]? = nil, items: [DataProductItem]? = nil, name: String, owningProjectIdentifier: String) { + self.clientToken = clientToken + self.description = description + self.domainIdentifier = domainIdentifier + self.formsInput = formsInput + self.glossaryTerms = glossaryTerms + self.items = items + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + try container.encodeIfPresent(self.formsInput, forKey: .formsInput) + try container.encodeIfPresent(self.glossaryTerms, forKey: .glossaryTerms) + try container.encodeIfPresent(self.items, forKey: .items) try container.encode(self.name, forKey: .name) - try container.encodeIfPresent(self.predictionConfiguration, forKey: .predictionConfiguration) - try container.encodeIfPresent(self.typeRevision, forKey: .typeRevision) + try container.encode(self.owningProjectIdentifier, forKey: .owningProjectIdentifier) } public func validate(name: String) throws { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") - try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.description, name: "description", parent: name, max: 4096) + try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") try self.formsInput?.forEach { try $0.validate(name: "\(name).formsInput[]") @@ -1796,11 +2579,13 @@ extension DataZone { } try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) - try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") - try self.validate(self.name, name: "name", parent: name, max: 256) + try self.items?.forEach { + try $0.validate(name: "\(name).items[]") + } + try self.validate(self.items, name: "items", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.typeRevision, name: "typeRevision", parent: name, max: 64) - try self.validate(self.typeRevision, name: "typeRevision", parent: name, min: 1) + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") } private enum CodingKeys: String, CodingKey { @@ -1808,72 +2593,57 @@ extension DataZone { case description = "description" case formsInput = "formsInput" case glossaryTerms = "glossaryTerms" + case items = "items" case name = "name" - case predictionConfiguration = "predictionConfiguration" - case typeRevision = "typeRevision" + case owningProjectIdentifier = "owningProjectIdentifier" } } - public struct CreateAssetRevisionOutput: AWSDecodableShape { - /// The timestamp of when the asset revision occured. + public struct CreateDataProductOutput: AWSDecodableShape { + /// The timestamp at which the data product was created. public let createdAt: Date? - /// The Amazon DataZone user who performed the asset revision. + /// The user who created the data product. public let createdBy: String? - /// The revised asset description. + /// The description of the data product. public let description: String? - /// The unique identifier of the Amazon DataZone domain where the asset was revised. + /// The ID of the domain where the data product lives. public let domainId: String - /// The external identifier of the asset. - public let externalIdentifier: String? - /// The timestamp of when the first asset revision occured. + /// The timestamp at which the first revision of the data product was created. public let firstRevisionCreatedAt: Date? - /// The Amazon DataZone user who performed the first asset revision. + /// The user who created the first revision of the data product. public let firstRevisionCreatedBy: String? - /// The metadata forms that were attached to the asset as part of the asset revision. - public let formsOutput: [FormOutput] - /// The glossary terms that were attached to the asset as part of asset revision. + /// The metadata forms of the data product. + public let formsOutput: [FormOutput]? + /// The glossary terms of the data product. public let glossaryTerms: [String]? - /// The unique identifier of the asset revision. + /// The ID of the data product. public let id: String - /// The latest data point that was imported into the time series form for the asset. - public let latestTimeSeriesDataPointFormsOutput: [TimeSeriesDataPointSummaryFormOutput]? - /// The details of an asset published in an Amazon DataZone catalog. - public let listing: AssetListingDetails? - /// The revised name of the asset. + /// The data assets of the data product. + public let items: [DataProductItem]? + /// The name of the data product. public let name: String - /// The unique identifier of the revised project that owns the asset. + /// The ID of the owning project of the data product. public let owningProjectId: String - /// The configuration of the automatically generated business-friendly metadata for the asset. - public let predictionConfiguration: PredictionConfiguration? - /// The read-only metadata forms that were attached to the asset as part of the asset revision. - public let readOnlyFormsOutput: [FormOutput]? - /// The revision of the asset. + /// The revision of the data product. public let revision: String - /// The identifier of the revision type. - public let typeIdentifier: String - /// The revision type of the asset. - public let typeRevision: String + /// The status of the data product. + public let status: DataProductStatus - public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, externalIdentifier: String? = nil, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput], glossaryTerms: [String]? = nil, id: String, latestTimeSeriesDataPointFormsOutput: [TimeSeriesDataPointSummaryFormOutput]? = nil, listing: AssetListingDetails? = nil, name: String, owningProjectId: String, predictionConfiguration: PredictionConfiguration? = nil, readOnlyFormsOutput: [FormOutput]? = nil, revision: String, typeIdentifier: String, typeRevision: String) { + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput]? = nil, glossaryTerms: [String]? = nil, id: String, items: [DataProductItem]? = nil, name: String, owningProjectId: String, revision: String, status: DataProductStatus) { self.createdAt = createdAt self.createdBy = createdBy self.description = description self.domainId = domainId - self.externalIdentifier = externalIdentifier self.firstRevisionCreatedAt = firstRevisionCreatedAt self.firstRevisionCreatedBy = firstRevisionCreatedBy self.formsOutput = formsOutput self.glossaryTerms = glossaryTerms self.id = id - self.latestTimeSeriesDataPointFormsOutput = latestTimeSeriesDataPointFormsOutput - self.listing = listing + self.items = items self.name = name self.owningProjectId = owningProjectId - self.predictionConfiguration = predictionConfiguration - self.readOnlyFormsOutput = readOnlyFormsOutput self.revision = revision - self.typeIdentifier = typeIdentifier - self.typeRevision = typeRevision + self.status = status } private enum CodingKeys: String, CodingKey { @@ -1881,117 +2651,141 @@ extension DataZone { case createdBy = "createdBy" case description = "description" case domainId = "domainId" - case externalIdentifier = "externalIdentifier" case firstRevisionCreatedAt = "firstRevisionCreatedAt" case firstRevisionCreatedBy = "firstRevisionCreatedBy" case formsOutput = "formsOutput" case glossaryTerms = "glossaryTerms" case id = "id" - case latestTimeSeriesDataPointFormsOutput = "latestTimeSeriesDataPointFormsOutput" - case listing = "listing" + case items = "items" case name = "name" case owningProjectId = "owningProjectId" - case predictionConfiguration = "predictionConfiguration" - case readOnlyFormsOutput = "readOnlyFormsOutput" case revision = "revision" - case typeIdentifier = "typeIdentifier" - case typeRevision = "typeRevision" + case status = "status" } } - public struct CreateAssetTypeInput: AWSEncodableShape { - /// The descripton of the custom asset type. + public struct CreateDataProductRevisionInput: AWSEncodableShape { + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The description of the data product revision. public let description: String? - /// The unique identifier of the Amazon DataZone domain where the custom asset type is being created. + /// The ID of the domain where the data product revision is created. public let domainIdentifier: String - /// The metadata forms that are to be attached to the custom asset type. - public let formsInput: [String: FormEntryInput] - /// The name of the custom asset type. + /// The metadata forms of the data product revision. + public let formsInput: [FormInput]? + /// The glossary terms of the data product revision. + public let glossaryTerms: [String]? + /// The ID of the data product revision. + public let identifier: String + /// The data assets of the data product revision. + public let items: [DataProductItem]? + /// The name of the data product revision. public let name: String - /// The identifier of the Amazon DataZone project that is to own the custom asset type. - public let owningProjectIdentifier: String - public init(description: String? = nil, domainIdentifier: String, formsInput: [String: FormEntryInput], name: String, owningProjectIdentifier: String) { + public init(clientToken: String? = CreateDataProductRevisionInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, formsInput: [FormInput]? = nil, glossaryTerms: [String]? = nil, identifier: String, items: [DataProductItem]? = nil, name: String) { + self.clientToken = clientToken self.description = description self.domainIdentifier = domainIdentifier self.formsInput = formsInput + self.glossaryTerms = glossaryTerms + self.identifier = identifier + self.items = items self.name = name - self.owningProjectIdentifier = owningProjectIdentifier } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.domainIdentifier, key: "domainIdentifier") - try container.encode(self.formsInput, forKey: .formsInput) + try container.encodeIfPresent(self.formsInput, forKey: .formsInput) + try container.encodeIfPresent(self.glossaryTerms, forKey: .glossaryTerms) + request.encodePath(self.identifier, key: "identifier") + try container.encodeIfPresent(self.items, forKey: .items) try container.encode(self.name, forKey: .name) - try container.encode(self.owningProjectIdentifier, forKey: .owningProjectIdentifier) } public func validate(name: String) throws { - try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 4096) + try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") - try self.formsInput.forEach { - try validate($0.key, name: "formsInput.key", parent: name, max: 128) - try validate($0.key, name: "formsInput.key", parent: name, min: 1) - try validate($0.key, name: "formsInput.key", parent: name, pattern: "^(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$") - try $0.value.validate(name: "\(name).formsInput[\"\($0.key)\"]") + try self.formsInput?.forEach { + try $0.validate(name: "\(name).formsInput[]") } try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) - try self.validate(self.name, name: "name", parent: name, max: 256) + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.items?.forEach { + try $0.validate(name: "\(name).items[]") + } + try self.validate(self.items, name: "items", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\.]*") - try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") } private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" case description = "description" case formsInput = "formsInput" + case glossaryTerms = "glossaryTerms" + case items = "items" case name = "name" - case owningProjectIdentifier = "owningProjectIdentifier" } } - public struct CreateAssetTypeOutput: AWSDecodableShape { - /// The timestamp of when the asset type is to be created. + public struct CreateDataProductRevisionOutput: AWSDecodableShape { + /// The timestamp at which the data product revision is created. public let createdAt: Date? - /// The Amazon DataZone user who creates this custom asset type. + /// The user who created the data product revision. public let createdBy: String? - /// The description of the custom asset type. + /// The description of the data product revision. public let description: String? - /// The ID of the Amazon DataZone domain in which the asset type was created. + /// The ID of the domain where data product revision is created. public let domainId: String - /// The metadata forms that are attached to the asset type. - public let formsOutput: [String: FormEntryOutput] - /// The name of the asset type. + /// The timestamp at which the first revision of the data product is created. + public let firstRevisionCreatedAt: Date? + /// The user who created the first revision of the data product. + public let firstRevisionCreatedBy: String? + /// The metadata forms of the data product revision. + public let formsOutput: [FormOutput]? + /// The glossary terms of the data product revision. + public let glossaryTerms: [String]? + /// The ID of the data product revision. + public let id: String + /// The data assets of the data product revision. + public let items: [DataProductItem]? + /// The name of the data product revision. public let name: String - /// The ID of the Amazon DataZone domain where the asset type was originally created. - public let originDomainId: String? - /// The ID of the Amazon DataZone project where the asset type was originally created. - public let originProjectId: String? - /// The ID of the Amazon DataZone project that currently owns this asset type. - public let owningProjectId: String? - /// The revision of the custom asset type. + /// The ID of the owning project of the data product revision. + public let owningProjectId: String + /// The revision of the data product revision. public let revision: String - /// The timestamp of when the custom type was created. - public let updatedAt: Date? - /// The Amazon DataZone user that created the custom asset type. - public let updatedBy: String? + /// The status of the data product revision. + public let status: DataProductStatus - public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String? = nil, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput]? = nil, glossaryTerms: [String]? = nil, id: String, items: [DataProductItem]? = nil, name: String, owningProjectId: String, revision: String, status: DataProductStatus) { self.createdAt = createdAt self.createdBy = createdBy self.description = description self.domainId = domainId + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy self.formsOutput = formsOutput + self.glossaryTerms = glossaryTerms + self.id = id + self.items = items self.name = name - self.originDomainId = originDomainId - self.originProjectId = originProjectId self.owningProjectId = owningProjectId self.revision = revision - self.updatedAt = updatedAt - self.updatedBy = updatedBy + self.status = status } private enum CodingKeys: String, CodingKey { @@ -1999,14 +2793,16 @@ extension DataZone { case createdBy = "createdBy" case description = "description" case domainId = "domainId" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" case formsOutput = "formsOutput" + case glossaryTerms = "glossaryTerms" + case id = "id" + case items = "items" case name = "name" - case originDomainId = "originDomainId" - case originProjectId = "originProjectId" case owningProjectId = "owningProjectId" case revision = "revision" - case updatedAt = "updatedAt" - case updatedBy = "updatedBy" + case status = "status" } } @@ -3287,6 +4083,21 @@ extension DataZone { /// The Amazon DataZone user who updated the subscription grant. public let updatedBy: String? + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = nil + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + @available(*, deprecated, message: "Members subscriptionId have been deprecated") public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { self.assets = assets self.createdAt = createdAt @@ -3677,60 +4488,228 @@ extension DataZone { } } - public struct DataProductItem: AWSDecodableShape { - public let domainId: String? - public let itemId: String? + public struct DataProductItem: AWSEncodableShape & AWSDecodableShape { + /// The glossary terms of the data product. + public let glossaryTerms: [String]? + /// The ID of the data product. + public let identifier: String + /// The type of the data product. + public let itemType: DataProductItemType + /// The revision of the data product. + public let revision: String? + + public init(glossaryTerms: [String]? = nil, identifier: String, itemType: DataProductItemType, revision: String? = nil) { + self.glossaryTerms = glossaryTerms + self.identifier = identifier + self.itemType = itemType + self.revision = revision + } + + public func validate(name: String) throws { + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 2) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case glossaryTerms = "glossaryTerms" + case identifier = "identifier" + case itemType = "itemType" + case revision = "revision" + } + } + + public struct DataProductListing: AWSDecodableShape { + /// The timestamp at which the data product listing was created. + public let createdAt: Date? + /// The ID of the data product listing. + public let dataProductId: String? + /// The revision of the data product listing. + public let dataProductRevision: String? + /// The metadata forms of the data product listing. + public let forms: String? + /// The glossary terms of the data product listing. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The data assets of the data product listing. + public let items: [ListingSummary]? + /// The ID of the owning project of the data product listing. + public let owningProjectId: String? + + public init(createdAt: Date? = nil, dataProductId: String? = nil, dataProductRevision: String? = nil, forms: String? = nil, glossaryTerms: [DetailedGlossaryTerm]? = nil, items: [ListingSummary]? = nil, owningProjectId: String? = nil) { + self.createdAt = createdAt + self.dataProductId = dataProductId + self.dataProductRevision = dataProductRevision + self.forms = forms + self.glossaryTerms = glossaryTerms + self.items = items + self.owningProjectId = owningProjectId + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataProductId = "dataProductId" + case dataProductRevision = "dataProductRevision" + case forms = "forms" + case glossaryTerms = "glossaryTerms" + case items = "items" + case owningProjectId = "owningProjectId" + } + } + + public struct DataProductListingItem: AWSDecodableShape { + /// The additional attributes of the asset of the data product. + public let additionalAttributes: DataProductListingItemAdditionalAttributes? + /// The timestamp at which the asset of the data product listing was created. + public let createdAt: Date? + /// The description of the asset of the asset of the data product. + public let description: String? + /// The entity ID of the asset of the asset of the data product. + public let entityId: String? + /// The revision of the asset of the asset of the data product. + public let entityRevision: String? + /// The glossary terms of the asset of the asset of the data product. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The data of the asset of the data product. + public let items: [ListingSummaryItem]? + /// The timestamp at which the listing was created. + public let listingCreatedBy: String? + /// The ID of the listing. + public let listingId: String? + /// The revision of the listing. + public let listingRevision: String? + /// The user who updated the listing. + public let listingUpdatedBy: String? + /// The name of the asset of the data product. + public let name: String? + /// The ID of the owning project of the asset of the data product. + public let owningProjectId: String? + + public init(additionalAttributes: DataProductListingItemAdditionalAttributes? = nil, createdAt: Date? = nil, description: String? = nil, entityId: String? = nil, entityRevision: String? = nil, glossaryTerms: [DetailedGlossaryTerm]? = nil, items: [ListingSummaryItem]? = nil, listingCreatedBy: String? = nil, listingId: String? = nil, listingRevision: String? = nil, listingUpdatedBy: String? = nil, name: String? = nil, owningProjectId: String? = nil) { + self.additionalAttributes = additionalAttributes + self.createdAt = createdAt + self.description = description + self.entityId = entityId + self.entityRevision = entityRevision + self.glossaryTerms = glossaryTerms + self.items = items + self.listingCreatedBy = listingCreatedBy + self.listingId = listingId + self.listingRevision = listingRevision + self.listingUpdatedBy = listingUpdatedBy + self.name = name + self.owningProjectId = owningProjectId + } + + private enum CodingKeys: String, CodingKey { + case additionalAttributes = "additionalAttributes" + case createdAt = "createdAt" + case description = "description" + case entityId = "entityId" + case entityRevision = "entityRevision" + case glossaryTerms = "glossaryTerms" + case items = "items" + case listingCreatedBy = "listingCreatedBy" + case listingId = "listingId" + case listingRevision = "listingRevision" + case listingUpdatedBy = "listingUpdatedBy" + case name = "name" + case owningProjectId = "owningProjectId" + } + } + + public struct DataProductListingItemAdditionalAttributes: AWSDecodableShape { + /// The metadata forms of the asset of the data product. + public let forms: String? - public init(domainId: String? = nil, itemId: String? = nil) { - self.domainId = domainId - self.itemId = itemId + public init(forms: String? = nil) { + self.forms = forms } private enum CodingKeys: String, CodingKey { - case domainId = "domainId" - case itemId = "itemId" + case forms = "forms" } } - public struct DataProductSummary: AWSDecodableShape { + public struct DataProductResultItem: AWSDecodableShape { + /// The timestamp at which the data product was created. public let createdAt: Date? + /// The user who created the data product. public let createdBy: String? - public let dataProductItems: [DataProductItem]? + /// The description of the data product. public let description: String? + /// The ID of the domain where the data product lives. public let domainId: String + /// The timestamp at which first revision of the data product was created. + public let firstRevisionCreatedAt: Date? + /// The user who created the first revision of the data product. + public let firstRevisionCreatedBy: String? + /// The glossary terms of the data product. public let glossaryTerms: [String]? + /// The ID of the data product. public let id: String + /// The name of the data product. public let name: String + /// The ID of the owning project of the data product. public let owningProjectId: String - public let updatedAt: Date? - public let updatedBy: String? - public init(createdAt: Date? = nil, createdBy: String? = nil, dataProductItems: [DataProductItem]? = nil, description: String? = nil, domainId: String, glossaryTerms: [String]? = nil, id: String, name: String, owningProjectId: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, glossaryTerms: [String]? = nil, id: String, name: String, owningProjectId: String) { self.createdAt = createdAt self.createdBy = createdBy - self.dataProductItems = dataProductItems self.description = description self.domainId = domainId + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy self.glossaryTerms = glossaryTerms self.id = id self.name = name self.owningProjectId = owningProjectId - self.updatedAt = updatedAt - self.updatedBy = updatedBy } private enum CodingKeys: String, CodingKey { case createdAt = "createdAt" case createdBy = "createdBy" - case dataProductItems = "dataProductItems" case description = "description" case domainId = "domainId" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" case glossaryTerms = "glossaryTerms" case id = "id" case name = "name" case owningProjectId = "owningProjectId" - case updatedAt = "updatedAt" - case updatedBy = "updatedBy" + } + } + + public struct DataProductRevision: AWSDecodableShape { + /// The timestamp at which the data product revision was created. + public let createdAt: Date? + /// The user who created the data product revision. + public let createdBy: String? + /// The ID of the domain where the data product revision lives. + public let domainId: String? + /// The ID of the data product revision. + public let id: String? + /// The data product revision. + public let revision: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, domainId: String? = nil, id: String? = nil, revision: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.id = id + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case id = "id" + case revision = "revision" } } @@ -3922,6 +4901,37 @@ extension DataZone { } } + public struct DeleteAssetFilterInput: AWSEncodableShape { + /// The ID of the data asset. + public let assetIdentifier: String + /// The ID of the domain where you want to delete an asset filter. + public let domainIdentifier: String + /// The ID of the asset filter that you want to delete. + public let identifier: String + + public init(assetIdentifier: String, domainIdentifier: String, identifier: String) { + self.assetIdentifier = assetIdentifier + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assetIdentifier, key: "assetIdentifier") + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.identifier, key: "identifier") + } + + public func validate(name: String) throws { + try self.validate(self.assetIdentifier, name: "assetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + public struct DeleteAssetInput: AWSEncodableShape { /// The ID of the Amazon DataZone domain in which the asset is deleted. public let domainIdentifier: String @@ -3984,6 +4994,36 @@ extension DataZone { public init() {} } + public struct DeleteDataProductInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which the data product is deleted. + public let domainIdentifier: String + /// The identifier of the data product that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.identifier, key: "identifier") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteDataProductOutput: AWSDecodableShape { + public init() {} + } + public struct DeleteDataSourceInput: AWSEncodableShape { /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. public let clientToken: String? @@ -4510,6 +5550,21 @@ extension DataZone { /// The Amazon DataZone user who updated the subscription grant that is deleted. public let updatedBy: String? + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = nil + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + @available(*, deprecated, message: "Members subscriptionId have been deprecated") public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { self.assets = assets self.createdAt = createdAt @@ -4833,6 +5888,8 @@ extension DataZone { public let environmentBlueprintId: String /// The ARN of the manage access role specified in the environment blueprint configuration. public let manageAccessRoleArn: String? + /// The provisioning configuration of a blueprint. + public let provisioningConfigurations: [ProvisioningConfiguration]? /// The ARN of the provisioning role specified in the environment blueprint configuration. public let provisioningRoleArn: String? /// The regional parameters of the environment blueprint. @@ -4840,12 +5897,13 @@ extension DataZone { /// The timestamp of when the environment blueprint was updated. public let updatedAt: Date? - public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { + public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningConfigurations: [ProvisioningConfiguration]? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { self.createdAt = createdAt self.domainId = domainId self.enabledRegions = enabledRegions self.environmentBlueprintId = environmentBlueprintId self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningConfigurations = provisioningConfigurations self.provisioningRoleArn = provisioningRoleArn self.regionalParameters = regionalParameters self.updatedAt = updatedAt @@ -4857,6 +5915,7 @@ extension DataZone { case enabledRegions = "enabledRegions" case environmentBlueprintId = "environmentBlueprintId" case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningConfigurations = "provisioningConfigurations" case provisioningRoleArn = "provisioningRoleArn" case regionalParameters = "regionalParameters" case updatedAt = "updatedAt" @@ -5048,6 +6107,23 @@ extension DataZone { } } + public struct EqualToExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might be equal to an expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + public struct FailureCause: AWSEncodableShape & AWSDecodableShape { /// The description of the error message. public let message: String? @@ -5268,6 +6344,90 @@ extension DataZone { } } + public struct GetAssetFilterInput: AWSEncodableShape { + /// The ID of the data asset. + public let assetIdentifier: String + /// The ID of the domain where you want to get an asset filter. + public let domainIdentifier: String + /// The ID of the asset filter. + public let identifier: String + + public init(assetIdentifier: String, domainIdentifier: String, identifier: String) { + self.assetIdentifier = assetIdentifier + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assetIdentifier, key: "assetIdentifier") + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.identifier, key: "identifier") + } + + public func validate(name: String) throws { + try self.validate(self.assetIdentifier, name: "assetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetAssetFilterOutput: AWSDecodableShape { + /// The ID of the data asset. + public let assetId: String + /// The configuration of the asset filter. + public let configuration: AssetFilterConfiguration + /// The timestamp at which the asset filter was created. + public let createdAt: Date? + /// The description of the asset filter. + public let description: String? + /// The ID of the domain where you want to get an asset filter. + public let domainId: String + /// The column names of the asset filter. + public let effectiveColumnNames: [String]? + /// The row filter of the asset filter. + public let effectiveRowFilter: String? + /// The error message that is displayed if the action does not complete successfully. + public let errorMessage: String? + /// The ID of the asset filter. + public let id: String + /// The name of the asset filter. + public let name: String + /// The status of the asset filter. + public let status: FilterStatus? + + public init(assetId: String, configuration: AssetFilterConfiguration, createdAt: Date? = nil, description: String? = nil, domainId: String, effectiveColumnNames: [String]? = nil, effectiveRowFilter: String? = nil, errorMessage: String? = nil, id: String, name: String, status: FilterStatus? = nil) { + self.assetId = assetId + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.effectiveColumnNames = effectiveColumnNames + self.effectiveRowFilter = effectiveRowFilter + self.errorMessage = errorMessage + self.id = id + self.name = name + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case effectiveColumnNames = "effectiveColumnNames" + case effectiveRowFilter = "effectiveRowFilter" + case errorMessage = "errorMessage" + case id = "id" + case name = "name" + case status = "status" + } + } + public struct GetAssetInput: AWSEncodableShape { /// The ID of the Amazon DataZone domain to which the asset belongs. public let domainIdentifier: String @@ -5448,12 +6608,107 @@ extension DataZone { self.domainId = domainId self.formsOutput = formsOutput self.name = name - self.originDomainId = originDomainId - self.originProjectId = originProjectId + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case formsOutput = "formsOutput" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetDataProductInput: AWSEncodableShape { + /// The ID of the domain where the data product lives. + public let domainIdentifier: String + /// The ID of the data product. + public let identifier: String + /// The revision of the data product. + public let revision: String? + + public init(domainIdentifier: String, identifier: String, revision: String? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.revision = revision + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.identifier, key: "identifier") + request.encodeQuery(self.revision, key: "revision") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetDataProductOutput: AWSDecodableShape { + /// The timestamp at which the data product is created. + public let createdAt: Date? + /// The user who created the data product. + public let createdBy: String? + /// The description of the data product. + public let description: String? + /// The ID of the domain where the data product lives. + public let domainId: String + /// The timestamp at which the first revision of the data product is created. + public let firstRevisionCreatedAt: Date? + /// The user who created the first revision of the data product. + public let firstRevisionCreatedBy: String? + /// The metadata forms of the data product. + public let formsOutput: [FormOutput]? + /// The glossary terms of the data product. + public let glossaryTerms: [String]? + /// The ID of the data product. + public let id: String + /// The data assets of the data product. + public let items: [DataProductItem]? + /// The name of the data product. + public let name: String + /// The ID of the owning project of the data product. + public let owningProjectId: String + /// The revision of the data product. + public let revision: String + /// The status of the data product. + public let status: DataProductStatus + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput]? = nil, glossaryTerms: [String]? = nil, id: String, items: [DataProductItem]? = nil, name: String, owningProjectId: String, revision: String, status: DataProductStatus) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy + self.formsOutput = formsOutput + self.glossaryTerms = glossaryTerms + self.id = id + self.items = items + self.name = name self.owningProjectId = owningProjectId self.revision = revision - self.updatedAt = updatedAt - self.updatedBy = updatedBy + self.status = status } private enum CodingKeys: String, CodingKey { @@ -5461,14 +6716,16 @@ extension DataZone { case createdBy = "createdBy" case description = "description" case domainId = "domainId" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" case formsOutput = "formsOutput" + case glossaryTerms = "glossaryTerms" + case id = "id" + case items = "items" case name = "name" - case originDomainId = "originDomainId" - case originProjectId = "originProjectId" case owningProjectId = "owningProjectId" case revision = "revision" - case updatedAt = "updatedAt" - case updatedBy = "updatedBy" + case status = "status" } } @@ -5867,6 +7124,8 @@ extension DataZone { public let environmentBlueprintId: String /// The ARN of the manage access role with which this blueprint is created. public let manageAccessRoleArn: String? + /// The provisioning configuration of a blueprint. + public let provisioningConfigurations: [ProvisioningConfiguration]? /// The ARN of the provisioning role with which this blueprint is created. public let provisioningRoleArn: String? /// The regional parameters of the blueprint. @@ -5874,12 +7133,13 @@ extension DataZone { /// The timestamp of when this blueprint was upated. public let updatedAt: Date? - public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { + public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningConfigurations: [ProvisioningConfiguration]? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { self.createdAt = createdAt self.domainId = domainId self.enabledRegions = enabledRegions self.environmentBlueprintId = environmentBlueprintId self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningConfigurations = provisioningConfigurations self.provisioningRoleArn = provisioningRoleArn self.regionalParameters = regionalParameters self.updatedAt = updatedAt @@ -5891,6 +7151,7 @@ extension DataZone { case enabledRegions = "enabledRegions" case environmentBlueprintId = "environmentBlueprintId" case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningConfigurations = "provisioningConfigurations" case provisioningRoleArn = "provisioningRoleArn" case regionalParameters = "regionalParameters" case updatedAt = "updatedAt" @@ -5972,6 +7233,57 @@ extension DataZone { } } + public struct GetEnvironmentCredentialsInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which this environment and its credentials exist. + public let domainIdentifier: String + /// The ID of the environment whose credentials this operation gets. + public let environmentIdentifier: String + + public init(domainIdentifier: String, environmentIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEnvironmentCredentialsOutput: AWSDecodableShape { + /// The access key ID of the environment. + public let accessKeyId: String? + /// The expiration timestamp of the environment credentials. + public let expiration: Date? + /// The secret access key of the environment credentials. + public let secretAccessKey: String? + /// The session token of the environment credentials. + public let sessionToken: String? + + public init(accessKeyId: String? = nil, expiration: Date? = nil, secretAccessKey: String? = nil, sessionToken: String? = nil) { + self.accessKeyId = accessKeyId + self.expiration = expiration + self.secretAccessKey = secretAccessKey + self.sessionToken = sessionToken + } + + private enum CodingKeys: String, CodingKey { + case accessKeyId = "accessKeyId" + case expiration = "expiration" + case secretAccessKey = "secretAccessKey" + case sessionToken = "sessionToken" + } + } + public struct GetEnvironmentInput: AWSEncodableShape { /// The ID of the Amazon DataZone domain where the environment exists. public let domainIdentifier: String @@ -6889,6 +8201,21 @@ extension DataZone { /// The Amazon DataZone user who updated the subscription grant. public let updatedBy: String? + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = nil + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + @available(*, deprecated, message: "Members subscriptionId have been deprecated") public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { self.assets = assets self.createdAt = createdAt @@ -7477,6 +8804,40 @@ extension DataZone { } } + public struct GreaterThanExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might be greater than an expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + + public struct GreaterThanOrEqualToExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might be greater than or equal to an expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + public struct GroupDetails: AWSDecodableShape { /// The identifier of the group in Amazon DataZone. public let groupId: String @@ -7545,6 +8906,127 @@ extension DataZone { } } + public struct InExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The values that might be in the expression. + public let values: [String] + + public init(columnName: String, values: [String]) { + self.columnName = columnName + self.values = values + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case values = "values" + } + } + + public struct IsNotNullExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + + public init(columnName: String) { + self.columnName = columnName + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + } + } + + public struct IsNullExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + + public init(columnName: String) { + self.columnName = columnName + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + } + } + + public struct LakeFormationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies certain Amazon S3 locations if you do not want Amazon DataZone to automatically register them in hybrid mode. + public let locationRegistrationExcludeS3Locations: [String]? + /// The role that is used to manage read/write access to the chosen Amazon S3 bucket(s) for Data Lake using AWS Lake Formation hybrid access mode. + public let locationRegistrationRole: String? + + public init(locationRegistrationExcludeS3Locations: [String]? = nil, locationRegistrationRole: String? = nil) { + self.locationRegistrationExcludeS3Locations = locationRegistrationExcludeS3Locations + self.locationRegistrationRole = locationRegistrationRole + } + + public func validate(name: String) throws { + try self.locationRegistrationExcludeS3Locations?.forEach { + try validate($0, name: "locationRegistrationExcludeS3Locations[]", parent: name, max: 1024) + try validate($0, name: "locationRegistrationExcludeS3Locations[]", parent: name, min: 1) + try validate($0, name: "locationRegistrationExcludeS3Locations[]", parent: name, pattern: "^s3://.+$") + } + try self.validate(self.locationRegistrationExcludeS3Locations, name: "locationRegistrationExcludeS3Locations", parent: name, max: 20) + try self.validate(self.locationRegistrationRole, name: "locationRegistrationRole", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") + } + + private enum CodingKeys: String, CodingKey { + case locationRegistrationExcludeS3Locations = "locationRegistrationExcludeS3Locations" + case locationRegistrationRole = "locationRegistrationRole" + } + } + + public struct LessThanExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might be less than the expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + + public struct LessThanOrEqualToExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might be less than or equal to an expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + + public struct LikeExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might be like the expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + public struct LineageNodeReference: AWSDecodableShape { /// The event timestamp of the data lineage node. public let eventTimestamp: Date? @@ -7639,39 +9121,153 @@ extension DataZone { /// The user who updated the data lineage node type. public let updatedBy: String? - public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String? = nil, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { - self.createdAt = createdAt - self.createdBy = createdBy - self.description = description - self.domainId = domainId - self.formsOutput = formsOutput - self.name = name - self.revision = revision - self.updatedAt = updatedAt - self.updatedBy = updatedBy + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String? = nil, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.formsOutput = formsOutput + self.name = name + self.revision = revision + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case formsOutput = "formsOutput" + case name = "name" + case revision = "revision" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct ListAssetFiltersInput: AWSEncodableShape { + /// The ID of the data asset. + public let assetIdentifier: String + /// The ID of the domain where you want to list asset filters. + public let domainIdentifier: String + /// The maximum number of asset filters to return in a single call to ListAssetFilters. When the number of asset filters to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListAssetFilters to list the next set of asset filters. + public let maxResults: Int? + /// When the number of asset filters is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of asset filters, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetFilters to list the next set of asset filters. + public let nextToken: String? + /// The status of the asset filter. + public let status: FilterStatus? + + public init(assetIdentifier: String, domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, status: FilterStatus? = nil) { + self.assetIdentifier = assetIdentifier + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.status = status + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assetIdentifier, key: "assetIdentifier") + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.status, key: "status") + } + + public func validate(name: String) throws { + try self.validate(self.assetIdentifier, name: "assetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAssetFiltersOutput: AWSDecodableShape { + /// The results of the ListAssetFilters action. + public let items: [AssetFilterSummary] + /// When the number of asset filters is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of asset filters, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetFilters to list the next set of asset filters. + public let nextToken: String? + + public init(items: [AssetFilterSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListAssetRevisionsInput: AWSEncodableShape { + /// The identifier of the domain. + public let domainIdentifier: String + /// The identifier of the asset. + public let identifier: String + /// The maximum number of revisions to return in a single call to ListAssetRevisions. When the number of revisions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListAssetRevisions to list the next set of revisions. + public let maxResults: Int? + /// When the number of revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetRevisions to list the next set of revisions. + public let nextToken: String? + + public init(domainIdentifier: String, identifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.identifier, key: "identifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAssetRevisionsOutput: AWSDecodableShape { + /// The results of the ListAssetRevisions action. + public let items: [AssetRevision]? + /// When the number of revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetRevisions to list the next set of revisions. + public let nextToken: String? + + public init(items: [AssetRevision]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case createdAt = "createdAt" - case createdBy = "createdBy" - case description = "description" - case domainId = "domainId" - case formsOutput = "formsOutput" - case name = "name" - case revision = "revision" - case updatedAt = "updatedAt" - case updatedBy = "updatedBy" + case items = "items" + case nextToken = "nextToken" } } - public struct ListAssetRevisionsInput: AWSEncodableShape { - /// The identifier of the domain. + public struct ListDataProductRevisionsInput: AWSEncodableShape { + /// The ID of the domain of the data product revisions that you want to list. public let domainIdentifier: String - /// The identifier of the asset. + /// The ID of the data product revision. public let identifier: String - /// The maximum number of revisions to return in a single call to ListAssetRevisions. When the number of revisions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListAssetRevisions to list the next set of revisions. + /// The maximum number of asset filters to return in a single call to ListDataProductRevisions. When the number of data product revisions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDataProductRevisions to list the next set of data product revisions. public let maxResults: Int? - /// When the number of revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetRevisions to list the next set of revisions. + /// When the number of data product revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of data product revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataProductRevisions to list the next set of data product revisions. public let nextToken: String? public init(domainIdentifier: String, identifier: String, maxResults: Int? = nil, nextToken: String? = nil) { @@ -7702,13 +9298,13 @@ extension DataZone { private enum CodingKeys: CodingKey {} } - public struct ListAssetRevisionsOutput: AWSDecodableShape { - /// The results of the ListAssetRevisions action. - public let items: [AssetRevision]? - /// When the number of revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetRevisions to list the next set of revisions. + public struct ListDataProductRevisionsOutput: AWSDecodableShape { + /// The results of the ListDataProductRevisions action. + public let items: [DataProductRevision] + /// When the number of data product revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of data product revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataProductRevisions to list the next set of data product revisions. public let nextToken: String? - public init(items: [AssetRevision]? = nil, nextToken: String? = nil) { + public init(items: [DataProductRevision], nextToken: String? = nil) { self.items = items self.nextToken = nextToken } @@ -8626,6 +10222,8 @@ extension DataZone { public let maxResults: Int? /// When the number of subscription grants is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription grants, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionGrants to list the next set of subscription grants. public let nextToken: String? + /// The ID of the owning project of the subscription grants. + public let owningProjectId: String? /// Specifies the way of sorting the results of this action. public let sortBy: SortKey? /// Specifies the sort order of this action. @@ -8637,11 +10235,12 @@ extension DataZone { /// The identifier of the subscription target. public let subscriptionTargetId: String? - public init(domainIdentifier: String, environmentId: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, sortBy: SortKey? = nil, sortOrder: SortOrder? = nil, subscribedListingId: String? = nil, subscriptionId: String? = nil, subscriptionTargetId: String? = nil) { + public init(domainIdentifier: String, environmentId: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, owningProjectId: String? = nil, sortBy: SortKey? = nil, sortOrder: SortOrder? = nil, subscribedListingId: String? = nil, subscriptionId: String? = nil, subscriptionTargetId: String? = nil) { self.domainIdentifier = domainIdentifier self.environmentId = environmentId self.maxResults = maxResults self.nextToken = nextToken + self.owningProjectId = owningProjectId self.sortBy = sortBy self.sortOrder = sortOrder self.subscribedListingId = subscribedListingId @@ -8656,6 +10255,7 @@ extension DataZone { request.encodeQuery(self.environmentId, key: "environmentId") request.encodeQuery(self.maxResults, key: "maxResults") request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.owningProjectId, key: "owningProjectId") request.encodeQuery(self.sortBy, key: "sortBy") request.encodeQuery(self.sortOrder, key: "sortOrder") request.encodeQuery(self.subscribedListingId, key: "subscribedListingId") @@ -8670,6 +10270,7 @@ extension DataZone { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.owningProjectId, name: "owningProjectId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") try self.validate(self.subscribedListingId, name: "subscribedListingId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") try self.validate(self.subscriptionId, name: "subscriptionId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") try self.validate(self.subscriptionTargetId, name: "subscriptionTargetId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") @@ -8710,7 +10311,7 @@ extension DataZone { public let sortBy: SortKey? /// Specifies the sort order for the results of this action. public let sortOrder: SortOrder? - /// Specifies the status of the subscription requests. + /// Specifies the status of the subscription requests. This is not a required parameter, but if not specified, by default, Amazon DataZone returns only PENDING subscription requests. public let status: SubscriptionRequestStatus? /// The identifier of the subscribed listing. public let subscribedListingId: String? @@ -8850,7 +10451,7 @@ extension DataZone { public let sortBy: SortKey? /// Specifies the sort order for the results of this action. public let sortOrder: SortOrder? - /// The status of the subscriptions that you want to list. + /// The status of the subscriptions that you want to list. This is not a required parameter, but if not provided, by default, Amazon DataZone returns only APPROVED subscriptions. public let status: SubscriptionStatus? /// The identifier of the subscribed listing for the subscriptions that you want to list. public let subscribedListingId: String? @@ -9060,6 +10661,48 @@ extension DataZone { } } + public struct ListingSummary: AWSDecodableShape { + /// The glossary terms of the data product. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The ID of the data product listing. + public let listingId: String? + /// The revision of the data product listing. + public let listingRevision: String? + + public init(glossaryTerms: [DetailedGlossaryTerm]? = nil, listingId: String? = nil, listingRevision: String? = nil) { + self.glossaryTerms = glossaryTerms + self.listingId = listingId + self.listingRevision = listingRevision + } + + private enum CodingKeys: String, CodingKey { + case glossaryTerms = "glossaryTerms" + case listingId = "listingId" + case listingRevision = "listingRevision" + } + } + + public struct ListingSummaryItem: AWSDecodableShape { + /// The glossary terms of the data product listing. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The ID of the data product listing. + public let listingId: String? + /// The revision of the data product listing. + public let listingRevision: String? + + public init(glossaryTerms: [DetailedGlossaryTerm]? = nil, listingId: String? = nil, listingRevision: String? = nil) { + self.glossaryTerms = glossaryTerms + self.listingId = listingId + self.listingRevision = listingRevision + } + + private enum CodingKeys: String, CodingKey { + case glossaryTerms = "glossaryTerms" + case listingId = "listingId" + case listingRevision = "listingRevision" + } + } + public struct MetadataGenerationRunItem: AWSDecodableShape { /// The timestamp at which the metadata generation run was created. public let createdAt: Date? @@ -9127,6 +10770,57 @@ extension DataZone { } } + public struct NotEqualToExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might not be equal to the expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + + public struct NotInExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might not be in the expression. + public let values: [String] + + public init(columnName: String, values: [String]) { + self.columnName = columnName + self.values = values + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case values = "values" + } + } + + public struct NotLikeExpression: AWSEncodableShape & AWSDecodableShape { + /// The name of the column. + public let columnName: String + /// The value that might not be like the expression. + public let value: String + + public init(columnName: String, value: String) { + self.columnName = columnName + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case columnName = "columnName" + case value = "value" + } + } + public struct NotificationOutput: AWSDecodableShape { /// The action link included in the notification. public let actionLink: String @@ -9411,16 +11105,19 @@ extension DataZone { public let environmentBlueprintIdentifier: String /// The ARN of the manage access role. public let manageAccessRoleArn: String? + /// The provisioning configuration of a blueprint. + public let provisioningConfigurations: [ProvisioningConfiguration]? /// The ARN of the provisioning role. public let provisioningRoleArn: String? /// The regional parameters in the environment blueprint. public let regionalParameters: [String: [String: String]]? - public init(domainIdentifier: String, enabledRegions: [String], environmentBlueprintIdentifier: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil) { + public init(domainIdentifier: String, enabledRegions: [String], environmentBlueprintIdentifier: String, manageAccessRoleArn: String? = nil, provisioningConfigurations: [ProvisioningConfiguration]? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil) { self.domainIdentifier = domainIdentifier self.enabledRegions = enabledRegions self.environmentBlueprintIdentifier = environmentBlueprintIdentifier self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningConfigurations = provisioningConfigurations self.provisioningRoleArn = provisioningRoleArn self.regionalParameters = regionalParameters } @@ -9432,6 +11129,7 @@ extension DataZone { try container.encode(self.enabledRegions, forKey: .enabledRegions) request.encodePath(self.environmentBlueprintIdentifier, key: "environmentBlueprintIdentifier") try container.encodeIfPresent(self.manageAccessRoleArn, forKey: .manageAccessRoleArn) + try container.encodeIfPresent(self.provisioningConfigurations, forKey: .provisioningConfigurations) try container.encodeIfPresent(self.provisioningRoleArn, forKey: .provisioningRoleArn) try container.encodeIfPresent(self.regionalParameters, forKey: .regionalParameters) } @@ -9445,6 +11143,9 @@ extension DataZone { } try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") try self.validate(self.manageAccessRoleArn, name: "manageAccessRoleArn", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") + try self.provisioningConfigurations?.forEach { + try $0.validate(name: "\(name).provisioningConfigurations[]") + } try self.validate(self.provisioningRoleArn, name: "provisioningRoleArn", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") try self.regionalParameters?.forEach { try validate($0.key, name: "regionalParameters.key", parent: name, max: 16) @@ -9456,6 +11157,7 @@ extension DataZone { private enum CodingKeys: String, CodingKey { case enabledRegions = "enabledRegions" case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningConfigurations = "provisioningConfigurations" case provisioningRoleArn = "provisioningRoleArn" case regionalParameters = "regionalParameters" } @@ -9472,6 +11174,8 @@ extension DataZone { public let environmentBlueprintId: String /// The ARN of the manage access role. public let manageAccessRoleArn: String? + /// The provisioning configuration of a blueprint. + public let provisioningConfigurations: [ProvisioningConfiguration]? /// The ARN of the provisioning role. public let provisioningRoleArn: String? /// The regional parameters in the environment blueprint. @@ -9479,12 +11183,13 @@ extension DataZone { /// The timestamp of when the environment blueprint was updated. public let updatedAt: Date? - public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { + public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningConfigurations: [ProvisioningConfiguration]? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { self.createdAt = createdAt self.domainId = domainId self.enabledRegions = enabledRegions self.environmentBlueprintId = environmentBlueprintId self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningConfigurations = provisioningConfigurations self.provisioningRoleArn = provisioningRoleArn self.regionalParameters = regionalParameters self.updatedAt = updatedAt @@ -9496,6 +11201,7 @@ extension DataZone { case enabledRegions = "enabledRegions" case environmentBlueprintId = "environmentBlueprintId" case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningConfigurations = "provisioningConfigurations" case provisioningRoleArn = "provisioningRoleArn" case regionalParameters = "regionalParameters" case updatedAt = "updatedAt" @@ -9949,6 +11655,23 @@ extension DataZone { } } + public struct RowFilterConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The row filter. + public let rowFilter: RowFilter + /// Specifies whether the row filter is sensitive. + public let sensitive: Bool? + + public init(rowFilter: RowFilter, sensitive: Bool? = nil) { + self.rowFilter = rowFilter + self.sensitive = sensitive + } + + private enum CodingKeys: String, CodingKey { + case rowFilter = "rowFilter" + case sensitive = "sensitive" + } + } + public struct RunStatisticsForAssets: AWSDecodableShape { /// The added statistic for the data source run. public let added: Int? @@ -10820,6 +12543,39 @@ extension DataZone { } } + public struct SubscribedProductListing: AWSDecodableShape { + /// The data assets of the data product listing. + public let assetListings: [AssetInDataProductListingItem]? + /// The description of the data product listing. + public let description: String? + /// The ID of the data product listing. + public let entityId: String? + /// The revision of the data product listing. + public let entityRevision: String? + /// The glossary terms of the data product listing. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The name of the data product listing. + public let name: String? + + public init(assetListings: [AssetInDataProductListingItem]? = nil, description: String? = nil, entityId: String? = nil, entityRevision: String? = nil, glossaryTerms: [DetailedGlossaryTerm]? = nil, name: String? = nil) { + self.assetListings = assetListings + self.description = description + self.entityId = entityId + self.entityRevision = entityRevision + self.glossaryTerms = glossaryTerms + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case assetListings = "assetListings" + case description = "description" + case entityId = "entityId" + case entityRevision = "entityRevision" + case glossaryTerms = "glossaryTerms" + case name = "name" + } + } + public struct SubscribedProject: AWSDecodableShape { /// The identifier of the project that has the subscription grant. public let id: String? @@ -10869,7 +12625,7 @@ extension DataZone { public let id: String /// The status of the subscription grant. public let status: SubscriptionGrantOverallStatus - /// The ID of the subscription grant. + /// The ID of the subscription. public let subscriptionId: String? /// The identifier of the target of the subscription grant. public let subscriptionTargetId: String @@ -10878,6 +12634,21 @@ extension DataZone { /// The Amazon DataZone user who updated the subscription grant. public let updatedBy: String? + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = nil + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + @available(*, deprecated, message: "Members subscriptionId have been deprecated") public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { self.assets = assets self.createdAt = createdAt @@ -11334,6 +13105,107 @@ extension DataZone { public init() {} } + public struct UpdateAssetFilterInput: AWSEncodableShape { + /// The ID of the data asset. + public let assetIdentifier: String + /// The configuration of the asset filter. + public let configuration: AssetFilterConfiguration? + /// The description of the asset filter. + public let description: String? + /// The ID of the domain where you want to update an asset filter. + public let domainIdentifier: String + /// The ID of the asset filter. + public let identifier: String + /// The name of the asset filter. + public let name: String? + + public init(assetIdentifier: String, configuration: AssetFilterConfiguration? = nil, description: String? = nil, domainIdentifier: String, identifier: String, name: String? = nil) { + self.assetIdentifier = assetIdentifier + self.configuration = configuration + self.description = description + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assetIdentifier, key: "assetIdentifier") + try container.encodeIfPresent(self.configuration, forKey: .configuration) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.identifier, key: "identifier") + try container.encodeIfPresent(self.name, forKey: .name) + } + + public func validate(name: String) throws { + try self.validate(self.assetIdentifier, name: "assetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case configuration = "configuration" + case description = "description" + case name = "name" + } + } + + public struct UpdateAssetFilterOutput: AWSDecodableShape { + /// The ID of the data asset. + public let assetId: String + /// The configuration of the asset filter. + public let configuration: AssetFilterConfiguration + /// The timestamp at which the asset filter was created. + public let createdAt: Date? + /// The description of the asset filter. + public let description: String? + /// The ID of the domain where the asset filter was created. + public let domainId: String + /// The column names of the asset filter. + public let effectiveColumnNames: [String]? + /// The row filter of the asset filter. + public let effectiveRowFilter: String? + /// The error message that is displayed if the action is not completed successfully. + public let errorMessage: String? + /// The ID of the asset filter. + public let id: String + /// The name of the asset filter. + public let name: String + /// The status of the asset filter. + public let status: FilterStatus? + + public init(assetId: String, configuration: AssetFilterConfiguration, createdAt: Date? = nil, description: String? = nil, domainId: String, effectiveColumnNames: [String]? = nil, effectiveRowFilter: String? = nil, errorMessage: String? = nil, id: String, name: String, status: FilterStatus? = nil) { + self.assetId = assetId + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.effectiveColumnNames = effectiveColumnNames + self.effectiveRowFilter = effectiveRowFilter + self.errorMessage = errorMessage + self.id = id + self.name = name + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case effectiveColumnNames = "effectiveColumnNames" + case effectiveRowFilter = "effectiveRowFilter" + case errorMessage = "errorMessage" + case id = "id" + case name = "name" + case status = "status" + } + } + public struct UpdateDataSourceInput: AWSEncodableShape { /// The asset forms to be updated as part of the UpdateDataSource action. public let assetFormsInput: [FormInput]? @@ -12345,6 +14217,21 @@ extension DataZone { /// The Amazon DataZone user who updated the subscription grant status. public let updatedBy: String? + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = nil + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + @available(*, deprecated, message: "Members subscriptionId have been deprecated") public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { self.assets = assets self.createdAt = createdAt @@ -12761,19 +14648,6 @@ extension DataZone { } } - public struct ListingItem: AWSDecodableShape { - /// An asset published in an Amazon DataZone catalog. - public let assetListing: AssetListing? - - public init(assetListing: AssetListing? = nil) { - self.assetListing = assetListing - } - - private enum CodingKeys: String, CodingKey { - case assetListing = "assetListing" - } - } - public struct Model: AWSEncodableShape & AWSDecodableShape { /// Indicates the smithy model of the API. public let smithy: String? @@ -12792,42 +14666,33 @@ extension DataZone { } } - public struct ProvisioningProperties: AWSDecodableShape { - /// The cloud formation properties included as part of the provisioning properties of an environment blueprint. - public let cloudFormation: CloudFormationProperties? - - public init(cloudFormation: CloudFormationProperties? = nil) { - self.cloudFormation = cloudFormation - } + public struct ProvisioningConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Lake Formation configuration of the Data Lake blueprint. + public let lakeFormationConfiguration: LakeFormationConfiguration? - private enum CodingKeys: String, CodingKey { - case cloudFormation = "cloudFormation" + public init(lakeFormationConfiguration: LakeFormationConfiguration? = nil) { + self.lakeFormationConfiguration = lakeFormationConfiguration } - } - public struct SearchResultItem: AWSDecodableShape { - /// The asset listing included in the results of the SearchListings action. - public let assetListing: AssetListingItem? - - public init(assetListing: AssetListingItem? = nil) { - self.assetListing = assetListing + public func validate(name: String) throws { + try self.lakeFormationConfiguration?.validate(name: "\(name).lakeFormationConfiguration") } private enum CodingKeys: String, CodingKey { - case assetListing = "assetListing" + case lakeFormationConfiguration = "lakeFormationConfiguration" } } - public struct SubscribedListingItem: AWSDecodableShape { - /// The asset for which the subscription grant is created. - public let assetListing: SubscribedAssetListing? + public struct ProvisioningProperties: AWSDecodableShape { + /// The cloud formation properties included as part of the provisioning properties of an environment blueprint. + public let cloudFormation: CloudFormationProperties? - public init(assetListing: SubscribedAssetListing? = nil) { - self.assetListing = assetListing + public init(cloudFormation: CloudFormationProperties? = nil) { + self.cloudFormation = cloudFormation } private enum CodingKeys: String, CodingKey { - case assetListing = "assetListing" + case cloudFormation = "cloudFormation" } } diff --git a/Sources/Soto/Services/DocDB/DocDB_api.swift b/Sources/Soto/Services/DocDB/DocDB_api.swift index 37ce13ec29..f5eed887b9 100644 --- a/Sources/Soto/Services/DocDB/DocDB_api.swift +++ b/Sources/Soto/Services/DocDB/DocDB_api.swift @@ -559,6 +559,19 @@ public struct DocDB: AWSService { ) } + /// Promotes the specified secondary DB cluster to be the primary DB cluster in the global cluster when failing over a global cluster occurs. Use this operation to respond to an unplanned event, such as a regional disaster in the primary region. Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state. + @Sendable + public func failoverGlobalCluster(_ input: FailoverGlobalClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> FailoverGlobalClusterResult { + return try await self.client.execute( + operation: "FailoverGlobalCluster", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists all tags on an Amazon DocumentDB resource. @Sendable public func listTagsForResource(_ input: ListTagsForResourceMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> TagListMessage { diff --git a/Sources/Soto/Services/DocDB/DocDB_shapes.swift b/Sources/Soto/Services/DocDB/DocDB_shapes.swift index 99c28ece88..6012e62e10 100644 --- a/Sources/Soto/Services/DocDB/DocDB_shapes.swift +++ b/Sources/Soto/Services/DocDB/DocDB_shapes.swift @@ -2425,6 +2425,52 @@ extension DocDB { } } + public struct FailoverGlobalClusterMessage: AWSEncodableShape { + /// Specifies whether to allow data loss for this global cluster operation. Allowing data loss triggers a global failover operation. If you don't specify AllowDataLoss, the global cluster operation defaults to a switchover. Constraints: Can't be specified together with the Switchover parameter. + public let allowDataLoss: Bool? + /// The identifier of the Amazon DocumentDB global cluster to apply this operation. The identifier is the unique key assigned by the user when the cluster is created. In other words, it's the name of the global cluster. Constraints: Must match the identifier of an existing global cluster. Minimum length of 1. Maximum length of 255. Pattern: [A-Za-z][0-9A-Za-z-:._]* + public let globalClusterIdentifier: String? + /// Specifies whether to switch over this global database cluster. Constraints: Can't be specified together with the AllowDataLoss parameter. + public let switchover: Bool? + /// The identifier of the secondary Amazon DocumentDB cluster that you want to promote to the primary for the global cluster. Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region. Constraints: Must match the identifier of an existing secondary cluster. Minimum length of 1. Maximum length of 255. Pattern: [A-Za-z][0-9A-Za-z-:._]* + public let targetDbClusterIdentifier: String? + + public init(allowDataLoss: Bool? = nil, globalClusterIdentifier: String? = nil, switchover: Bool? = nil, targetDbClusterIdentifier: String? = nil) { + self.allowDataLoss = allowDataLoss + self.globalClusterIdentifier = globalClusterIdentifier + self.switchover = switchover + self.targetDbClusterIdentifier = targetDbClusterIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.globalClusterIdentifier, name: "globalClusterIdentifier", parent: name, max: 255) + try self.validate(self.globalClusterIdentifier, name: "globalClusterIdentifier", parent: name, min: 1) + try self.validate(self.globalClusterIdentifier, name: "globalClusterIdentifier", parent: name, pattern: "^[A-Za-z][0-9A-Za-z-:._]*$") + try self.validate(self.targetDbClusterIdentifier, name: "targetDbClusterIdentifier", parent: name, max: 255) + try self.validate(self.targetDbClusterIdentifier, name: "targetDbClusterIdentifier", parent: name, min: 1) + try self.validate(self.targetDbClusterIdentifier, name: "targetDbClusterIdentifier", parent: name, pattern: "^[A-Za-z][0-9A-Za-z-:._]*$") + } + + private enum CodingKeys: String, CodingKey { + case allowDataLoss = "AllowDataLoss" + case globalClusterIdentifier = "GlobalClusterIdentifier" + case switchover = "Switchover" + case targetDbClusterIdentifier = "TargetDbClusterIdentifier" + } + } + + public struct FailoverGlobalClusterResult: AWSDecodableShape { + public let globalCluster: GlobalCluster? + + public init(globalCluster: GlobalCluster? = nil) { + self.globalCluster = globalCluster + } + + private enum CodingKeys: String, CodingKey { + case globalCluster = "GlobalCluster" + } + } + public struct Filter: AWSEncodableShape { public struct _ValuesEncoding: ArrayCoderProperties { public static let member = "Value" } @@ -3545,6 +3591,9 @@ extension DocDB { try self.validate(self.globalClusterIdentifier, name: "globalClusterIdentifier", parent: name, max: 255) try self.validate(self.globalClusterIdentifier, name: "globalClusterIdentifier", parent: name, min: 1) try self.validate(self.globalClusterIdentifier, name: "globalClusterIdentifier", parent: name, pattern: "^[A-Za-z][0-9A-Za-z-:._]*$") + try self.validate(self.targetDbClusterIdentifier, name: "targetDbClusterIdentifier", parent: name, max: 255) + try self.validate(self.targetDbClusterIdentifier, name: "targetDbClusterIdentifier", parent: name, min: 1) + try self.validate(self.targetDbClusterIdentifier, name: "targetDbClusterIdentifier", parent: name, pattern: "^[A-Za-z][0-9A-Za-z-:._]*$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift index ea944c6df8..ac675967fd 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift @@ -97,7 +97,7 @@ public struct DynamoDB: AWSService { // MARK: API Calls - /// This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement. + /// This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB . The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement. @Sendable public func batchExecuteStatement(_ input: BatchExecuteStatementInput, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchExecuteStatementOutput { return try await self.client.execute( @@ -125,7 +125,7 @@ public struct DynamoDB: AWSService { ) } - /// The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes. + /// The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. For tables and indexes with provisioned capacity, if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. For all tables and indexes, if none of the items can be processed due to other throttling scenarios (such as exceeding partition level limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes. @Sendable public func batchWriteItem(_ input: BatchWriteItemInput, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchWriteItemOutput { return try await self.client.execute( @@ -230,7 +230,7 @@ public struct DynamoDB: AWSService { ) } - /// The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table. + /// The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table. @Sendable public func deleteTable(_ input: DeleteTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTableOutput { return try await self.client.execute( @@ -288,7 +288,7 @@ public struct DynamoDB: AWSService { ) } - /// Returns the regional endpoint information. For more information on policy permissions, please see Internetwork traffic privacy. + /// Returns the regional endpoint information. For more information on policy permissions, please see Internetwork traffic privacy. @Sendable public func describeEndpoints(_ input: DescribeEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEndpointsResponse { return try await self.client.execute( @@ -460,7 +460,7 @@ public struct DynamoDB: AWSService { ) } - /// This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL. For PartiQL reads (SELECT statement), if the total number of processed items exceeds the maximum dataset size limit of 1 MB, the read stops and results are returned to the user as a LastEvaluatedKey value to continue the read in a subsequent operation. If the filter criteria in WHERE clause does not match any data, the read will return an empty result set. A single SELECT statement response can return up to the maximum number of items (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any filtering to the results using WHERE clause). If LastEvaluatedKey is present in the response, you need to paginate the result set. If NextToken is present, you need to paginate the result set and include NextToken. + /// This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL. For PartiQL reads (SELECT statement), if the total number of processed items exceeds the maximum dataset size limit of 1 MB, the read stops and results are returned to the user as a LastEvaluatedKey value to continue the read in a subsequent operation. If the filter criteria in WHERE clause does not match any data, the read will return an empty result set. A single SELECT statement response can return up to the maximum number of items (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any filtering to the results using WHERE clause). If LastEvaluatedKey is present in the response, you need to paginate the result set. If NextToken is present, you need to paginate the result set and include NextToken. @Sendable public func executeStatement(_ input: ExecuteStatementInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ExecuteStatementOutput { return try await self.client.execute( @@ -542,7 +542,7 @@ public struct DynamoDB: AWSService { ) } - /// List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. To list these backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page. In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested. You can call ListBackups a maximum of five times per second. If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the Amazon Web Services Backup list API. + /// List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. To list these backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page. In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested. You can call ListBackups a maximum of five times per second. If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the Amazon Web Services Backup list API. @Sendable public func listBackups(_ input: ListBackupsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBackupsOutput { return try await self.client.execute( @@ -716,7 +716,7 @@ public struct DynamoDB: AWSService { ) } - /// The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount. Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table. A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level. DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested. + /// The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount. Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table. A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level. DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested. @Sendable public func scan(_ input: ScanInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ScanOutput { return try await self.client.execute( @@ -819,7 +819,7 @@ public struct DynamoDB: AWSService { ) } - /// Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 you can use UpdateTable instead. Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas. If global secondary indexes are specified, then the following conditions must also be met: The global secondary indexes must have the same name. The global secondary indexes must have the same hash key and sort key (if present). The global secondary indexes must have the same provisioned and maximum write capacity units. + /// Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 you can use UpdateTable instead. Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas. If global secondary indexes are specified, then the following conditions must also be met: The global secondary indexes must have the same name. The global secondary indexes must have the same hash key and sort key (if present). The global secondary indexes must have the same provisioned and maximum write capacity units. @Sendable public func updateGlobalTable(_ input: UpdateGlobalTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGlobalTableOutput { return try await self.client.execute( @@ -1038,7 +1038,7 @@ extension DynamoDB { ) } - /// The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount. Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table. A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level. DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested. + /// The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount. Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table. A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level. DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift index 633c0c142e..1e2415fcda 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift @@ -850,7 +850,7 @@ extension DynamoDB { public struct BatchExecuteStatementOutput: AWSDecodableShape { /// The capacity units consumed by the entire operation. The values of the list are ordered according to the ordering of the statements. public let consumedCapacity: [ConsumedCapacity]? - /// The response to each PartiQL statement in the batch. The values of the list are ordered according to the ordering of the request statements. + /// The response to each PartiQL statement in the batch. The values of the list are ordered according to the ordering of the request statements. public let responses: [BatchStatementResponse]? public init(consumedCapacity: [ConsumedCapacity]? = nil, responses: [BatchStatementResponse]? = nil) { @@ -4255,7 +4255,7 @@ extension DynamoDB { public let returnConsumedCapacity: ReturnConsumedCapacity? /// Specifies the order for index traversal: If true (default), the traversal is performed in ascending order; if false, the traversal is performed in descending order. Items with the same partition key value are stored in sorted order by sort key. If the sort key data type is Number, the results are stored in numeric order. For type String, the results are stored in order of UTF-8 bytes. For type Binary, DynamoDB treats each byte of the binary data as unsigned. If ScanIndexForward is true, DynamoDB returns the results in the order in which they are stored (by sort key value). This is the default behavior. If ScanIndexForward is false, DynamoDB reads the results in reverse order by sort key value, and then returns the results to the client. public let scanIndexForward: Bool? - /// The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index. ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index, DynamoDB fetches the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required. ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES. COUNT - Returns the number of matching items, rather than the matching items themselves. Note that this uses the same quantity of read capacity units as getting the items, and is subject to the same item size calculations. SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. This return value is equivalent to specifying ProjectionExpression without specifying any value for Select. If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB fetches each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table. If neither Select nor ProjectionExpression are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and ProjectionExpression together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying ProjectionExpression without any value for Select.) If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. + /// The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index. ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index, DynamoDB fetches the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required. ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES. COUNT - Returns the number of matching items, rather than the matching items themselves. Note that this uses the same quantity of read capacity units as getting the items, and is subject to the same item size calculations. SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. This return value is equivalent to specifying ProjectionExpression without specifying any value for Select. If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB fetches each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table. If neither Select nor ProjectionExpression are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and ProjectionExpression together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying ProjectionExpression without any value for Select.) If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. public let select: Select? /// The name of the table containing the requested items. You can also provide the Amazon Resource Name (ARN) of the table in this parameter. public let tableName: String @@ -5035,7 +5035,7 @@ extension DynamoDB { public let scanFilter: [String: Condition]? /// For a parallel Scan request, Segment identifies an individual segment to be scanned by an application worker. Segment IDs are zero-based, so the first segment is always 0. For example, if you want to use four application threads to scan a table or an index, then the first thread specifies a Segment value of 0, the second thread specifies 1, and so on. The value of LastEvaluatedKey returned from a parallel Scan request must be used as ExclusiveStartKey with the same segment ID in a subsequent Scan operation. The value for Segment must be greater than or equal to 0, and less than the value provided for TotalSegments. If you provide Segment, you must also provide TotalSegments. public let segment: Int? - /// The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index. ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index, DynamoDB fetches the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required. ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES. COUNT - Returns the number of matching items, rather than the matching items themselves. Note that this uses the same quantity of read capacity units as getting the items, and is subject to the same item size calculations. SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. This return value is equivalent to specifying ProjectionExpression without specifying any value for Select. If you query or scan a local secondary index and request only attributes that are projected into that index, the operation reads only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB fetches each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table. If neither Select nor ProjectionExpression are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and ProjectionExpression together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying ProjectionExpression without any value for Select.) If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. + /// The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index. ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index, DynamoDB fetches the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required. ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES. COUNT - Returns the number of matching items, rather than the matching items themselves. Note that this uses the same quantity of read capacity units as getting the items, and is subject to the same item size calculations. SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. This return value is equivalent to specifying ProjectionExpression without specifying any value for Select. If you query or scan a local secondary index and request only attributes that are projected into that index, the operation reads only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB fetches each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table. If neither Select nor ProjectionExpression are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and ProjectionExpression together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying ProjectionExpression without any value for Select.) If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. public let select: Select? /// The name of the table containing the requested items or if you provide IndexName, the name of the table to which that index belongs. You can also provide the Amazon Resource Name (ARN) of the table in this parameter. public let tableName: String @@ -5113,7 +5113,7 @@ extension DynamoDB { } public struct ScanOutput: AWSDecodableShape { - /// The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// The number of items in the response. If you set ScanFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before the filter was applied. If you did not use a filter in the request, then Count is the same as ScannedCount. public let count: Int? diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index 01841fcaa2..9d76ea13bf 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -866,6 +866,20 @@ public struct EC2: AWSService { ) } + /// Create a new Capacity Reservation by splitting the available capacity of the source Capacity Reservation. The new Capacity Reservation will have the same attributes as the source Capacity Reservation except for tags. The source Capacity Reservation must be active and owned by your Amazon Web Services account. + /// + @Sendable + public func createCapacityReservationBySplitting(_ input: CreateCapacityReservationBySplittingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCapacityReservationBySplittingResult { + return try await self.client.execute( + operation: "CreateCapacityReservationBySplitting", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a Capacity Reservation Fleet. For more information, see Create a /// Capacity Reservation Fleet in the /// Amazon EC2 User Guide. @@ -3077,7 +3091,7 @@ public struct EC2: AWSService { ) } - /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted. + /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for 14 days after the transfers have been accepted. @Sendable public func describeAddressTransfers(_ input: DescribeAddressTransfersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAddressTransfersResult { return try await self.client.execute( @@ -6648,11 +6662,10 @@ public struct EC2: AWSService { ) } - /// Modifies a Capacity Reservation's capacity and the conditions under which it is to be released. You - /// cannot change a Capacity Reservation's instance type, EBS optimization, instance store settings, - /// platform, Availability Zone, or instance eligibility. If you need to modify any of these - /// attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with - /// the required attributes. + /// Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under which it is to be released. You + /// can't modify a Capacity Reservation's instance type, EBS optimization, platform, instance store settings, Availability Zone, or + /// tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with + /// the required attributes. For more information, see Modify an active Capacity Reservation. @Sendable public func modifyCapacityReservation(_ input: ModifyCapacityReservationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyCapacityReservationResult { return try await self.client.execute( @@ -7518,6 +7531,20 @@ public struct EC2: AWSService { ) } + /// Move available capacity from a source Capacity Reservation to a destination Capacity Reservation. The source Capacity Reservation and the destination Capacity Reservation must be active, owned by your Amazon Web Services account, and share the following: + /// Instance type Platform Availability Zone Tenancy Placement group Capacity Reservation end time - At specific time or Manually. + @Sendable + public func moveCapacityReservationInstances(_ input: MoveCapacityReservationInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> MoveCapacityReservationInstancesResult { + return try await self.client.execute( + operation: "MoveCapacityReservationInstances", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr. Amazon Web Services verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring your own IP addresses (BYOIP) in the Amazon EC2 User Guide. Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool. @Sendable public func provisionByoipCidr(_ input: ProvisionByoipCidrRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ProvisionByoipCidrResult { @@ -8422,7 +8449,7 @@ extension EC2 { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension EC2 { - /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted. + /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for 14 days after the transfers have been accepted. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index d25fa481f0..d49892eb3e 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -1938,6 +1938,13 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum IpSource: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case amazon = "amazon" + case byoip = "byoip" + case none = "none" + public var description: String { return self.rawValue } + } + public enum IpamAddressHistoryResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case eip = "eip" case instance = "instance" @@ -2085,6 +2092,13 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum IpamResourceCidrIpSource: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case amazon = "amazon" + case byoip = "byoip" + case none = "none" + public var description: String { return self.rawValue } + } + public enum IpamResourceDiscoveryAssociationState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case associateComplete = "associate-complete" case associateFailed = "associate-failed" @@ -2168,6 +2182,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum Ipv6AddressAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `private` = "private" + case `public` = "public" + public var description: String { return self.rawValue } + } + public enum Ipv6SupportValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disable = "disable" case enable = "enable" @@ -2733,7 +2753,6 @@ extension EC2 { case volume = "volume" case vpc = "vpc" case vpcBlockPublicAccessExclusion = "vpc-block-public-access-exclusion" - case vpcEncryptionControl = "vpc-encryption-control" case vpcEndpoint = "vpc-endpoint" case vpcEndpointConnection = "vpc-endpoint-connection" case vpcEndpointConnectionDeviceType = "vpc-endpoint-connection-device-type" @@ -4574,7 +4593,7 @@ extension EC2 { public let coreNetworkArn: String? /// The destination IPv4 address, in CIDR notation. public let destinationCidr: String? - /// The prefix of the Amazon Web Service. + /// The prefix of the Amazon Web Services service. public let destinationPrefixListId: String? /// The ID of an egress-only internet gateway. public let egressOnlyInternetGatewayId: String? @@ -8655,6 +8674,65 @@ extension EC2 { } } + public struct CreateCapacityReservationBySplittingRequest: AWSEncodableShape { + public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } + + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency. + public let clientToken: String? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The number of instances to split from the source Capacity Reservation. + /// + public let instanceCount: Int? + /// The ID of the Capacity Reservation from which you want to split the available capacity. + /// + public let sourceCapacityReservationId: String? + /// The tags to apply to the new Capacity Reservation. + /// + @OptionalCustomCoding> + public var tagSpecifications: [TagSpecification]? + + public init(clientToken: String? = CreateCapacityReservationBySplittingRequest.idempotencyToken(), dryRun: Bool? = nil, instanceCount: Int? = nil, sourceCapacityReservationId: String? = nil, tagSpecifications: [TagSpecification]? = nil) { + self.clientToken = clientToken + self.dryRun = dryRun + self.instanceCount = instanceCount + self.sourceCapacityReservationId = sourceCapacityReservationId + self.tagSpecifications = tagSpecifications + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case dryRun = "DryRun" + case instanceCount = "InstanceCount" + case sourceCapacityReservationId = "SourceCapacityReservationId" + case tagSpecifications = "TagSpecification" + } + } + + public struct CreateCapacityReservationBySplittingResult: AWSDecodableShape { + /// Information about the destination Capacity Reservation. + /// + public let destinationCapacityReservation: CapacityReservation? + /// The number of instances in the new Capacity Reservation. The number of instances in the source Capacity Reservation was reduced by this amount. + /// + public let instanceCount: Int? + /// Information about the source Capacity Reservation. + /// + public let sourceCapacityReservation: CapacityReservation? + + public init(destinationCapacityReservation: CapacityReservation? = nil, instanceCount: Int? = nil, sourceCapacityReservation: CapacityReservation? = nil) { + self.destinationCapacityReservation = destinationCapacityReservation + self.instanceCount = instanceCount + self.sourceCapacityReservation = sourceCapacityReservation + } + + private enum CodingKeys: String, CodingKey { + case destinationCapacityReservation = "destinationCapacityReservation" + case instanceCount = "instanceCount" + case sourceCapacityReservation = "sourceCapacityReservation" + } + } + public struct CreateCapacityReservationFleetRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } @@ -10103,6 +10181,8 @@ extension EC2 { public let description: String? /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? + /// Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default. + public let enablePrivateGua: Bool? /// The operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. @OptionalCustomCoding> public var operatingRegions: [AddIpamOperatingRegion]? @@ -10112,10 +10192,11 @@ extension EC2 { /// IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab. public let tier: IpamTier? - public init(clientToken: String? = CreateIpamRequest.idempotencyToken(), description: String? = nil, dryRun: Bool? = nil, operatingRegions: [AddIpamOperatingRegion]? = nil, tagSpecifications: [TagSpecification]? = nil, tier: IpamTier? = nil) { + public init(clientToken: String? = CreateIpamRequest.idempotencyToken(), description: String? = nil, dryRun: Bool? = nil, enablePrivateGua: Bool? = nil, operatingRegions: [AddIpamOperatingRegion]? = nil, tagSpecifications: [TagSpecification]? = nil, tier: IpamTier? = nil) { self.clientToken = clientToken self.description = description self.dryRun = dryRun + self.enablePrivateGua = enablePrivateGua self.operatingRegions = operatingRegions self.tagSpecifications = tagSpecifications self.tier = tier @@ -10129,6 +10210,7 @@ extension EC2 { case clientToken = "ClientToken" case description = "Description" case dryRun = "DryRun" + case enablePrivateGua = "EnablePrivateGua" case operatingRegions = "OperatingRegion" case tagSpecifications = "TagSpecification" case tier = "Tier" @@ -10945,7 +11027,7 @@ extension EC2 { public struct CreateNetworkInterfacePermissionRequest: AWSEncodableShape { /// The Amazon Web Services account ID. public let awsAccountId: String? - /// The Amazon Web Service. Currently not supported. + /// The Amazon Web Services service. Currently not supported. public let awsService: String? /// Checks whether you have the required permissions for the action, without actually making the request, /// and provides an error response. If you have the required permissions, the error response is DryRunOperation. @@ -21149,7 +21231,7 @@ extension EC2 { /// One or more filters. network-interface-permission.network-interface-permission-id - The ID of the /// permission. network-interface-permission.network-interface-id - The ID of - /// the network interface. network-interface-permission.aws-account-id - The Amazon Web Services account ID. network-interface-permission.aws-service - The Amazon Web Service. network-interface-permission.permission - The type of + /// the network interface. network-interface-permission.aws-account-id - The Amazon Web Services account ID. network-interface-permission.aws-service - The Amazon Web Services service. network-interface-permission.permission - The type of /// permission (INSTANCE-ATTACH | /// EIP-ASSOCIATE). @OptionalCustomCoding> @@ -21228,7 +21310,7 @@ extension EC2 { /// nat_gateway | network_load_balancer | /// quicksight | /// transit_gateway | trunk | - /// vpc_endpoint). mac-address - The MAC address of the network interface. network-interface-id - The ID of the network interface. owner-id - The Amazon Web Services account ID of the network interface owner. private-dns-name - The private DNS name of the network interface (IPv4). private-ip-address - The private IPv4 address or addresses of the network interface. requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service + /// vpc_endpoint). mac-address - The MAC address of the network interface. network-interface-id - The ID of the network interface. owner-id - The Amazon Web Services account ID of the network interface owner. private-dns-name - The private DNS name of the network interface (IPv4). private-ip-address - The private IPv4 address or addresses of the network interface. requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service /// (for example, Amazon Web Services Management Console, Auto Scaling, and so on). source-dest-check - Indicates whether the network interface performs source/destination checking. /// A value of true means checking is enabled, and false means checking is disabled. /// The value must be false for the network interface to perform network address translation (NAT) in your VPC. status - The status of the network interface. If the network interface is not attached to an instance, the status is available; @@ -21852,7 +21934,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The filters. association.gateway-id - The ID of the gateway involved in the - /// association. association.route-table-association-id - The ID of an association ID for the route table. association.route-table-id - The ID of the route table involved in the association. association.subnet-id - The ID of the subnet involved in the association. association.main - Indicates whether the route table is the main route table for the VPC (true | false). Route tables that do not have an association ID are not returned in the response. owner-id - The ID of the Amazon Web Services account that owns the route table. route-table-id - The ID of the route table. route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table. route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table. route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Service specified in a route in the table. route.egress-only-internet-gateway-id - The ID of an egress-only Internet gateway specified in a route in the route table. route.gateway-id - The ID of a gateway specified in a route in the table. route.instance-id - The ID of an instance specified in a route in the table. route.nat-gateway-id - The ID of a NAT gateway. route.transit-gateway-id - The ID of a transit gateway. route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation. route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on). route.vpc-peering-connection-id - The ID of a VPC peering + /// association. association.route-table-association-id - The ID of an association ID for the route table. association.route-table-id - The ID of the route table involved in the association. association.subnet-id - The ID of the subnet involved in the association. association.main - Indicates whether the route table is the main route table for the VPC (true | false). Route tables that do not have an association ID are not returned in the response. owner-id - The ID of the Amazon Web Services account that owns the route table. route-table-id - The ID of the route table. route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table. route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table. route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Services service specified in a route in the table. route.egress-only-internet-gateway-id - The ID of an egress-only Internet gateway specified in a route in the route table. route.gateway-id - The ID of a gateway specified in a route in the table. route.instance-id - The ID of an instance specified in a route in the table. route.nat-gateway-id - The ID of a NAT gateway. route.transit-gateway-id - The ID of a transit gateway. route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation. route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on). route.vpc-peering-connection-id - The ID of a VPC peering /// connection specified in a route in the table. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. vpc-id - The ID of the VPC for the route table. @OptionalCustomCoding> public var filters: [Filter]? @@ -28994,7 +29076,7 @@ extension EC2 { public struct FleetLaunchTemplateOverrides: AWSDecodableShape { /// The Availability Zone in which to launch the instances. public let availabilityZone: String? - /// The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template. + /// The ID of the AMI in the format ami-17characters00000. Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch. To reference a public parameter: resolve:ssm:public-parameter To reference a parameter stored in the same account: resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label To reference a parameter shared from another Amazon Web Services account: resolve:ssm:parameter-ARN resolve:ssm:parameter-ARN:version-number resolve:ssm:parameter-ARN:label For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template. public let imageId: String? /// The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with those attributes. If you specify InstanceRequirements, you can't specify InstanceType. public let instanceRequirements: InstanceRequirements? @@ -29039,7 +29121,7 @@ extension EC2 { public struct FleetLaunchTemplateOverridesRequest: AWSEncodableShape { /// The Availability Zone in which to launch the instances. public let availabilityZone: String? - /// The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template. + /// The ID of the AMI in the format ami-17characters00000. Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch. To reference a public parameter: resolve:ssm:public-parameter To reference a parameter stored in the same account: resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label To reference a parameter shared from another Amazon Web Services account: resolve:ssm:parameter-ARN resolve:ssm:parameter-ARN:version-number resolve:ssm:parameter-ARN:label For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template. public let imageId: String? /// The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with those attributes. If you specify InstanceRequirements, you can't specify InstanceType. public let instanceRequirements: InstanceRequirementsRequest? @@ -35582,6 +35664,8 @@ extension EC2 { public let defaultResourceDiscoveryId: String? /// The description for the IPAM. public let description: String? + /// Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default. + public let enablePrivateGua: Bool? /// The Amazon Resource Name (ARN) of the IPAM. public let ipamArn: String? /// The ID of the IPAM. @@ -35611,10 +35695,11 @@ extension EC2 { /// IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab. public let tier: IpamTier? - public init(defaultResourceDiscoveryAssociationId: String? = nil, defaultResourceDiscoveryId: String? = nil, description: String? = nil, ipamArn: String? = nil, ipamId: String? = nil, ipamRegion: String? = nil, operatingRegions: [IpamOperatingRegion]? = nil, ownerId: String? = nil, privateDefaultScopeId: String? = nil, publicDefaultScopeId: String? = nil, resourceDiscoveryAssociationCount: Int? = nil, scopeCount: Int? = nil, state: IpamState? = nil, stateMessage: String? = nil, tags: [Tag]? = nil, tier: IpamTier? = nil) { + public init(defaultResourceDiscoveryAssociationId: String? = nil, defaultResourceDiscoveryId: String? = nil, description: String? = nil, enablePrivateGua: Bool? = nil, ipamArn: String? = nil, ipamId: String? = nil, ipamRegion: String? = nil, operatingRegions: [IpamOperatingRegion]? = nil, ownerId: String? = nil, privateDefaultScopeId: String? = nil, publicDefaultScopeId: String? = nil, resourceDiscoveryAssociationCount: Int? = nil, scopeCount: Int? = nil, state: IpamState? = nil, stateMessage: String? = nil, tags: [Tag]? = nil, tier: IpamTier? = nil) { self.defaultResourceDiscoveryAssociationId = defaultResourceDiscoveryAssociationId self.defaultResourceDiscoveryId = defaultResourceDiscoveryId self.description = description + self.enablePrivateGua = enablePrivateGua self.ipamArn = ipamArn self.ipamId = ipamId self.ipamRegion = ipamRegion @@ -35634,6 +35719,7 @@ extension EC2 { case defaultResourceDiscoveryAssociationId = "defaultResourceDiscoveryAssociationId" case defaultResourceDiscoveryId = "defaultResourceDiscoveryId" case description = "description" + case enablePrivateGua = "enablePrivateGua" case ipamArn = "ipamArn" case ipamId = "ipamId" case ipamRegion = "ipamRegion" @@ -35844,6 +35930,8 @@ extension EC2 { public let availabilityZoneId: String? /// The resource discovery ID. public let ipamResourceDiscoveryId: String? + /// The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space. + public let ipSource: IpamResourceCidrIpSource? /// The percentage of IP address space in use. To convert the decimal to a percentage, multiply the decimal by 100. Note the following: For resources that are VPCs, this is the percentage of IP address space in the VPC that's taken up by subnet CIDRs. For resources that are subnets, if the subnet has an IPv4 CIDR provisioned to it, this is the percentage of IPv4 address space in the subnet that's in use. If the subnet has an IPv6 CIDR provisioned to it, the percentage of IPv6 address space in use is not represented. The percentage of IPv6 address space in use cannot currently be calculated. For resources that are public IPv4 pools, this is the percentage of IP address space in the pool that's been allocated to Elastic IP addresses (EIPs). public let ipUsage: Double? /// For elastic network interfaces, this is the status of whether or not the elastic network interface is attached. @@ -35866,9 +35954,10 @@ extension EC2 { /// The VPC ID. public let vpcId: String? - public init(availabilityZoneId: String? = nil, ipamResourceDiscoveryId: String? = nil, ipUsage: Double? = nil, networkInterfaceAttachmentStatus: IpamNetworkInterfaceAttachmentStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, sampleTime: Date? = nil, vpcId: String? = nil) { + public init(availabilityZoneId: String? = nil, ipamResourceDiscoveryId: String? = nil, ipSource: IpamResourceCidrIpSource? = nil, ipUsage: Double? = nil, networkInterfaceAttachmentStatus: IpamNetworkInterfaceAttachmentStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, sampleTime: Date? = nil, vpcId: String? = nil) { self.availabilityZoneId = availabilityZoneId self.ipamResourceDiscoveryId = ipamResourceDiscoveryId + self.ipSource = ipSource self.ipUsage = ipUsage self.networkInterfaceAttachmentStatus = networkInterfaceAttachmentStatus self.resourceCidr = resourceCidr @@ -35884,6 +35973,7 @@ extension EC2 { private enum CodingKeys: String, CodingKey { case availabilityZoneId = "availabilityZoneId" case ipamResourceDiscoveryId = "ipamResourceDiscoveryId" + case ipSource = "ipSource" case ipUsage = "ipUsage" case networkInterfaceAttachmentStatus = "networkInterfaceAttachmentStatus" case resourceCidr = "resourceCidr" @@ -39120,8 +39210,13 @@ extension EC2 { /// The number of instances for which to reserve capacity. The number of instances can't be increased or /// decreased by more than 1000 in a single request. public let instanceCount: Int? + /// The matching criteria (instance eligibility) that you want to use in the modified Capacity Reservation. If you change the instance eligibility of an existing Capacity Reservation from targeted to open, + /// any running instances that match the attributes of the Capacity Reservation, have the CapacityReservationPreference set to open, and + /// are not yet running in the Capacity Reservation, will automatically use the modified Capacity Reservation. + /// To modify the instance eligibility, the Capacity Reservation must be completely idle (zero usage). + public let instanceMatchCriteria: InstanceMatchCriteria? - public init(accept: Bool? = nil, additionalInfo: String? = nil, capacityReservationId: String? = nil, dryRun: Bool? = nil, endDate: Date? = nil, endDateType: EndDateType? = nil, instanceCount: Int? = nil) { + public init(accept: Bool? = nil, additionalInfo: String? = nil, capacityReservationId: String? = nil, dryRun: Bool? = nil, endDate: Date? = nil, endDateType: EndDateType? = nil, instanceCount: Int? = nil, instanceMatchCriteria: InstanceMatchCriteria? = nil) { self.accept = accept self.additionalInfo = additionalInfo self.capacityReservationId = capacityReservationId @@ -39129,6 +39224,7 @@ extension EC2 { self.endDate = endDate self.endDateType = endDateType self.instanceCount = instanceCount + self.instanceMatchCriteria = instanceMatchCriteria } private enum CodingKeys: String, CodingKey { @@ -39139,6 +39235,7 @@ extension EC2 { case endDate = "EndDate" case endDateType = "EndDateType" case instanceCount = "InstanceCount" + case instanceMatchCriteria = "InstanceMatchCriteria" } } @@ -40121,6 +40218,8 @@ extension EC2 { public let description: String? /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? + /// Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default. + public let enablePrivateGua: Bool? /// The ID of the IPAM you want to modify. public let ipamId: String? /// The operating Regions to remove. @@ -40129,10 +40228,11 @@ extension EC2 { /// IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab. public let tier: IpamTier? - public init(addOperatingRegions: [AddIpamOperatingRegion]? = nil, description: String? = nil, dryRun: Bool? = nil, ipamId: String? = nil, removeOperatingRegions: [RemoveIpamOperatingRegion]? = nil, tier: IpamTier? = nil) { + public init(addOperatingRegions: [AddIpamOperatingRegion]? = nil, description: String? = nil, dryRun: Bool? = nil, enablePrivateGua: Bool? = nil, ipamId: String? = nil, removeOperatingRegions: [RemoveIpamOperatingRegion]? = nil, tier: IpamTier? = nil) { self.addOperatingRegions = addOperatingRegions self.description = description self.dryRun = dryRun + self.enablePrivateGua = enablePrivateGua self.ipamId = ipamId self.removeOperatingRegions = removeOperatingRegions self.tier = tier @@ -40147,6 +40247,7 @@ extension EC2 { case addOperatingRegions = "AddOperatingRegion" case description = "Description" case dryRun = "DryRun" + case enablePrivateGua = "EnablePrivateGua" case ipamId = "IpamId" case removeOperatingRegions = "RemoveOperatingRegion" case tier = "Tier" @@ -42499,6 +42600,62 @@ extension EC2 { } } + public struct MoveCapacityReservationInstancesRequest: AWSEncodableShape { + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency. + public let clientToken: String? + /// The ID of the Capacity Reservation that you want to move capacity into. + /// + public let destinationCapacityReservationId: String? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The number of instances that you want to move from the source Capacity Reservation. + /// + public let instanceCount: Int? + /// The ID of the Capacity Reservation from which you want to move capacity. + /// + public let sourceCapacityReservationId: String? + + public init(clientToken: String? = MoveCapacityReservationInstancesRequest.idempotencyToken(), destinationCapacityReservationId: String? = nil, dryRun: Bool? = nil, instanceCount: Int? = nil, sourceCapacityReservationId: String? = nil) { + self.clientToken = clientToken + self.destinationCapacityReservationId = destinationCapacityReservationId + self.dryRun = dryRun + self.instanceCount = instanceCount + self.sourceCapacityReservationId = sourceCapacityReservationId + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case destinationCapacityReservationId = "DestinationCapacityReservationId" + case dryRun = "DryRun" + case instanceCount = "InstanceCount" + case sourceCapacityReservationId = "SourceCapacityReservationId" + } + } + + public struct MoveCapacityReservationInstancesResult: AWSDecodableShape { + /// Information about the destination Capacity Reservation. + /// + public let destinationCapacityReservation: CapacityReservation? + /// The number of instances that were moved from the source Capacity Reservation to the destination Capacity Reservation. + /// + public let instanceCount: Int? + /// Information about the source Capacity Reservation. + /// + public let sourceCapacityReservation: CapacityReservation? + + public init(destinationCapacityReservation: CapacityReservation? = nil, instanceCount: Int? = nil, sourceCapacityReservation: CapacityReservation? = nil) { + self.destinationCapacityReservation = destinationCapacityReservation + self.instanceCount = instanceCount + self.sourceCapacityReservation = sourceCapacityReservation + } + + private enum CodingKeys: String, CodingKey { + case destinationCapacityReservation = "destinationCapacityReservation" + case instanceCount = "instanceCount" + case sourceCapacityReservation = "sourceCapacityReservation" + } + } + public struct MovingAddressStatus: AWSDecodableShape { /// The status of the Elastic IP address that's being moved or restored. public let moveStatus: MoveStatus? @@ -43407,7 +43564,7 @@ extension EC2 { public struct NetworkInterfacePermission: AWSDecodableShape { /// The Amazon Web Services account ID. public let awsAccountId: String? - /// The Amazon Web Service. + /// The Amazon Web Services service. public let awsService: String? /// The ID of the network interface. public let networkInterfaceId: String? @@ -44400,7 +44557,7 @@ extension EC2 { public struct PrefixList: AWSDecodableShape { public struct _CidrsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The IP address range of the Amazon Web Service. + /// The IP address range of the Amazon Web Services service. @OptionalCustomCoding> public var cidrs: [String]? /// The ID of the prefix. @@ -44889,7 +45046,7 @@ extension EC2 { public let dryRun: Bool? /// The ID of the IPAM pool you would like to use to allocate this CIDR. public let ipamPoolId: String? - /// The netmask length of the CIDR you would like to allocate to the public IPv4 pool. + /// The netmask length of the CIDR you would like to allocate to the public IPv4 pool. The least specific netmask length you can define is 24. public let netmaskLength: Int? /// The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide. public let networkBorderGroup: String? @@ -46409,7 +46566,7 @@ extension EC2 { public let hibernationOptions: LaunchTemplateHibernationOptionsRequest? /// The name or Amazon Resource Name (ARN) of an IAM instance profile. public let iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecificationRequest? - /// The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch. Valid formats: ami-17characters00000 resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label resolve:ssm:public-parameter Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. + /// The ID of the AMI in the format ami-17characters00000. Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch. To reference a public parameter: resolve:ssm:public-parameter To reference a parameter stored in the same account: resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label To reference a parameter shared from another Amazon Web Services account: resolve:ssm:parameter-ARN resolve:ssm:parameter-ARN:version-number resolve:ssm:parameter-ARN:label For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. If the launch template will be used for an EC2 Fleet or Spot Fleet, note the following: Only EC2 Fleets of type instant support specifying a Systems Manager parameter. For EC2 Fleets of type maintain or request, or for Spot Fleets, you must specify the AMI ID. public let imageId: String? /// Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown). Default: stop public let instanceInitiatedShutdownBehavior: ShutdownBehavior? @@ -48021,7 +48178,7 @@ extension EC2 { public let destinationCidrBlock: String? /// The IPv6 CIDR block used for the destination match. public let destinationIpv6CidrBlock: String? - /// The prefix of the Amazon Web Service. + /// The prefix of the Amazon Web Services service. public let destinationPrefixListId: String? /// The ID of the egress-only internet gateway. public let egressOnlyInternetGatewayId: String? @@ -51266,19 +51423,27 @@ extension EC2 { public struct SubnetIpv6CidrBlockAssociation: AWSDecodableShape { /// The ID of the association. public let associationId: String? + /// The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space. + public let ipSource: IpSource? + /// Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services. + public let ipv6AddressAttribute: Ipv6AddressAttribute? /// The IPv6 CIDR block. public let ipv6CidrBlock: String? /// The state of the CIDR block. public let ipv6CidrBlockState: SubnetCidrBlockState? - public init(associationId: String? = nil, ipv6CidrBlock: String? = nil, ipv6CidrBlockState: SubnetCidrBlockState? = nil) { + public init(associationId: String? = nil, ipSource: IpSource? = nil, ipv6AddressAttribute: Ipv6AddressAttribute? = nil, ipv6CidrBlock: String? = nil, ipv6CidrBlockState: SubnetCidrBlockState? = nil) { self.associationId = associationId + self.ipSource = ipSource + self.ipv6AddressAttribute = ipv6AddressAttribute self.ipv6CidrBlock = ipv6CidrBlock self.ipv6CidrBlockState = ipv6CidrBlockState } private enum CodingKeys: String, CodingKey { case associationId = "associationId" + case ipSource = "ipSource" + case ipv6AddressAttribute = "ipv6AddressAttribute" case ipv6CidrBlock = "ipv6CidrBlock" case ipv6CidrBlockState = "ipv6CidrBlockState" } @@ -55276,6 +55441,10 @@ extension EC2 { public struct VpcIpv6CidrBlockAssociation: AWSDecodableShape { /// The association ID for the IPv6 CIDR block. public let associationId: String? + /// The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space. + public let ipSource: IpSource? + /// Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services. + public let ipv6AddressAttribute: Ipv6AddressAttribute? /// The IPv6 CIDR block. public let ipv6CidrBlock: String? /// Information about the state of the CIDR block. @@ -55285,8 +55454,10 @@ extension EC2 { /// The name of the unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses, for example, us-east-1-wl1-bos-wlz-1. public let networkBorderGroup: String? - public init(associationId: String? = nil, ipv6CidrBlock: String? = nil, ipv6CidrBlockState: VpcCidrBlockState? = nil, ipv6Pool: String? = nil, networkBorderGroup: String? = nil) { + public init(associationId: String? = nil, ipSource: IpSource? = nil, ipv6AddressAttribute: Ipv6AddressAttribute? = nil, ipv6CidrBlock: String? = nil, ipv6CidrBlockState: VpcCidrBlockState? = nil, ipv6Pool: String? = nil, networkBorderGroup: String? = nil) { self.associationId = associationId + self.ipSource = ipSource + self.ipv6AddressAttribute = ipv6AddressAttribute self.ipv6CidrBlock = ipv6CidrBlock self.ipv6CidrBlockState = ipv6CidrBlockState self.ipv6Pool = ipv6Pool @@ -55295,6 +55466,8 @@ extension EC2 { private enum CodingKeys: String, CodingKey { case associationId = "associationId" + case ipSource = "ipSource" + case ipv6AddressAttribute = "ipv6AddressAttribute" case ipv6CidrBlock = "ipv6CidrBlock" case ipv6CidrBlockState = "ipv6CidrBlockState" case ipv6Pool = "ipv6Pool" diff --git a/Sources/Soto/Services/ECR/ECR_api.swift b/Sources/Soto/Services/ECR/ECR_api.swift index 03b838b1b2..7810faf854 100644 --- a/Sources/Soto/Services/ECR/ECR_api.swift +++ b/Sources/Soto/Services/ECR/ECR_api.swift @@ -244,6 +244,19 @@ public struct ECR: AWSService { ) } + /// Creates a repository creation template. This template is used to define the settings for repositories created by Amazon ECR on your behalf. For example, repositories created through pull through cache actions. For more information, see Private repository creation templates in the Amazon Elastic Container Registry User Guide. + @Sendable + public func createRepositoryCreationTemplate(_ input: CreateRepositoryCreationTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateRepositoryCreationTemplateResponse { + return try await self.client.execute( + operation: "CreateRepositoryCreationTemplate", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the lifecycle policy associated with the specified repository. @Sendable public func deleteLifecyclePolicy(_ input: DeleteLifecyclePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteLifecyclePolicyResponse { @@ -296,6 +309,19 @@ public struct ECR: AWSService { ) } + /// Deletes a repository creation template. + @Sendable + public func deleteRepositoryCreationTemplate(_ input: DeleteRepositoryCreationTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteRepositoryCreationTemplateResponse { + return try await self.client.execute( + operation: "DeleteRepositoryCreationTemplate", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the repository policy associated with the specified repository. @Sendable public func deleteRepositoryPolicy(_ input: DeleteRepositoryPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteRepositoryPolicyResponse { @@ -387,6 +413,32 @@ public struct ECR: AWSService { ) } + /// Returns details about the repository creation templates in a registry. The prefixes request parameter can be used to return the details for a specific repository creation template. + @Sendable + public func describeRepositoryCreationTemplates(_ input: DescribeRepositoryCreationTemplatesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeRepositoryCreationTemplatesResponse { + return try await self.client.execute( + operation: "DescribeRepositoryCreationTemplates", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the basic scan type version name. + @Sendable + public func getAccountSetting(_ input: GetAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAccountSettingResponse { + return try await self.client.execute( + operation: "GetAccountSetting", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves an authorization token. An authorization token represents your IAM authentication credentials and can be used to access any Amazon ECR registry that your IAM principal has access to. The authorization token is valid for 12 hours. The authorizationToken returned is a base64 encoded string that can be decoded and used in a docker login command to authenticate to a registry. The CLI offers an get-login-password command that simplifies the login process. For more information, see Registry authentication in the Amazon Elastic Container Registry User Guide. @Sendable public func getAuthorizationToken(_ input: GetAuthorizationTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAuthorizationTokenResponse { @@ -517,6 +569,19 @@ public struct ECR: AWSService { ) } + /// Allows you to change the basic scan type version by setting the name parameter to either CLAIR to AWS_NATIVE. + @Sendable + public func putAccountSetting(_ input: PutAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAccountSettingResponse { + return try await self.client.execute( + operation: "PutAccountSetting", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates or updates the image manifest and tags associated with an image. When an image is pushed and all new image layers have been uploaded, the PutImage API is called once to create or update the image manifest and the tags associated with the image. This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images. @Sendable public func putImage(_ input: PutImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutImageResponse { @@ -595,7 +660,7 @@ public struct ECR: AWSService { ) } - /// Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy. + /// Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. For more information on the custom role for replication, see Creating an IAM role for replication. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy. @Sendable public func putReplicationConfiguration(_ input: PutReplicationConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutReplicationConfigurationResponse { return try await self.client.execute( @@ -686,6 +751,19 @@ public struct ECR: AWSService { ) } + /// Updates an existing repository creation template. + @Sendable + public func updateRepositoryCreationTemplate(_ input: UpdateRepositoryCreationTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRepositoryCreationTemplateResponse { + return try await self.client.execute( + operation: "UpdateRepositoryCreationTemplate", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Uploads an image layer part to Amazon ECR. When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part. This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images. @Sendable public func uploadLayerPart(_ input: UploadLayerPartRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UploadLayerPartResponse { @@ -802,6 +880,25 @@ extension ECR { ) } + /// Returns details about the repository creation templates in a registry. The prefixes request parameter can be used to return the details for a specific repository creation template. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func describeRepositoryCreationTemplatesPaginator( + _ input: DescribeRepositoryCreationTemplatesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeRepositoryCreationTemplates, + inputKey: \DescribeRepositoryCreationTemplatesRequest.nextToken, + outputKey: \DescribeRepositoryCreationTemplatesResponse.nextToken, + logger: logger + ) + } + /// Retrieves the results of the lifecycle policy preview request for the specified repository. /// Return PaginatorSequence for operation. /// @@ -888,6 +985,16 @@ extension ECR.DescribeRepositoriesRequest: AWSPaginateToken { } } +extension ECR.DescribeRepositoryCreationTemplatesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ECR.DescribeRepositoryCreationTemplatesRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + prefixes: self.prefixes + ) + } +} + extension ECR.GetLifecyclePolicyPreviewRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> ECR.GetLifecyclePolicyPreviewRequest { return .init( diff --git a/Sources/Soto/Services/ECR/ECR_shapes.swift b/Sources/Soto/Services/ECR/ECR_shapes.swift index b34bc9e0fa..5b777f8434 100644 --- a/Sources/Soto/Services/ECR/ECR_shapes.swift +++ b/Sources/Soto/Services/ECR/ECR_shapes.swift @@ -87,6 +87,12 @@ extension ECR { public var description: String { return self.rawValue } } + public enum RCTAppliedFor: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case pullThroughCache = "PULL_THROUGH_CACHE" + case replication = "REPLICATION" + public var description: String { return self.rawValue } + } + public enum ReplicationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case complete = "COMPLETE" case failed = "FAILED" @@ -501,7 +507,7 @@ extension ECR { public let registryId: String? /// The name of the upstream registry. public let upstreamRegistry: UpstreamRegistry? - /// The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com + /// The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io public let upstreamRegistryUrl: String public init(credentialArn: String? = nil, ecrRepositoryPrefix: String, registryId: String? = nil, upstreamRegistry: UpstreamRegistry? = nil, upstreamRegistryUrl: String) { @@ -564,6 +570,79 @@ extension ECR { } } + public struct CreateRepositoryCreationTemplateRequest: AWSEncodableShape { + /// A list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION + public let appliedFor: [RCTAppliedFor] + /// The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template. + public let customRoleArn: String? + /// A description for the repository creation template. + public let description: String? + /// The encryption configuration to use for repositories created using the template. + public let encryptionConfiguration: EncryptionConfigurationForRepositoryCreationTemplate? + /// The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten. + public let imageTagMutability: ImageTagMutability? + /// The lifecycle policy to use for repositories created using the template. + public let lifecyclePolicy: String? + /// The repository namespace prefix to associate with the template. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. Similarly, a prefix of prod/team would apply to all repositories beginning with prod/team/. To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix. There is always an assumed / applied to the end of the prefix. If you specify ecr-public as the prefix, Amazon ECR treats that as ecr-public/. When using a pull through cache rule, the repository prefix you specify during rule creation is what you should specify as your repository creation template prefix as well. + public let prefix: String + /// The repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions. + public let repositoryPolicy: String? + /// The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. + public let resourceTags: [Tag]? + + public init(appliedFor: [RCTAppliedFor], customRoleArn: String? = nil, description: String? = nil, encryptionConfiguration: EncryptionConfigurationForRepositoryCreationTemplate? = nil, imageTagMutability: ImageTagMutability? = nil, lifecyclePolicy: String? = nil, prefix: String, repositoryPolicy: String? = nil, resourceTags: [Tag]? = nil) { + self.appliedFor = appliedFor + self.customRoleArn = customRoleArn + self.description = description + self.encryptionConfiguration = encryptionConfiguration + self.imageTagMutability = imageTagMutability + self.lifecyclePolicy = lifecyclePolicy + self.prefix = prefix + self.repositoryPolicy = repositoryPolicy + self.resourceTags = resourceTags + } + + public func validate(name: String) throws { + try self.validate(self.customRoleArn, name: "customRoleArn", parent: name, max: 2048) + try self.validate(self.description, name: "description", parent: name, max: 256) + try self.encryptionConfiguration?.validate(name: "\(name).encryptionConfiguration") + try self.validate(self.lifecyclePolicy, name: "lifecyclePolicy", parent: name, max: 30720) + try self.validate(self.prefix, name: "prefix", parent: name, max: 256) + try self.validate(self.prefix, name: "prefix", parent: name, min: 1) + try self.validate(self.prefix, name: "prefix", parent: name, pattern: "^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$") + try self.validate(self.repositoryPolicy, name: "repositoryPolicy", parent: name, max: 10240) + } + + private enum CodingKeys: String, CodingKey { + case appliedFor = "appliedFor" + case customRoleArn = "customRoleArn" + case description = "description" + case encryptionConfiguration = "encryptionConfiguration" + case imageTagMutability = "imageTagMutability" + case lifecyclePolicy = "lifecyclePolicy" + case prefix = "prefix" + case repositoryPolicy = "repositoryPolicy" + case resourceTags = "resourceTags" + } + } + + public struct CreateRepositoryCreationTemplateResponse: AWSDecodableShape { + /// The registry ID associated with the request. + public let registryId: String? + /// The details of the repository creation template associated with the request. + public let repositoryCreationTemplate: RepositoryCreationTemplate? + + public init(registryId: String? = nil, repositoryCreationTemplate: RepositoryCreationTemplate? = nil) { + self.registryId = registryId + self.repositoryCreationTemplate = repositoryCreationTemplate + } + + private enum CodingKeys: String, CodingKey { + case registryId = "registryId" + case repositoryCreationTemplate = "repositoryCreationTemplate" + } + } + public struct CreateRepositoryRequest: AWSEncodableShape { /// The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest. public let encryptionConfiguration: EncryptionConfiguration? @@ -812,6 +891,42 @@ extension ECR { } } + public struct DeleteRepositoryCreationTemplateRequest: AWSEncodableShape { + /// The repository namespace prefix associated with the repository creation template. + public let prefix: String + + public init(prefix: String) { + self.prefix = prefix + } + + public func validate(name: String) throws { + try self.validate(self.prefix, name: "prefix", parent: name, max: 256) + try self.validate(self.prefix, name: "prefix", parent: name, min: 1) + try self.validate(self.prefix, name: "prefix", parent: name, pattern: "^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$") + } + + private enum CodingKeys: String, CodingKey { + case prefix = "prefix" + } + } + + public struct DeleteRepositoryCreationTemplateResponse: AWSDecodableShape { + /// The registry ID associated with the request. + public let registryId: String? + /// The details of the repository creation template that was deleted. + public let repositoryCreationTemplate: RepositoryCreationTemplate? + + public init(registryId: String? = nil, repositoryCreationTemplate: RepositoryCreationTemplate? = nil) { + self.registryId = registryId + self.repositoryCreationTemplate = repositoryCreationTemplate + } + + private enum CodingKeys: String, CodingKey { + case registryId = "registryId" + case repositoryCreationTemplate = "repositoryCreationTemplate" + } + } + public struct DeleteRepositoryPolicyRequest: AWSEncodableShape { /// The Amazon Web Services account ID associated with the registry that contains the repository policy to delete. If you do not specify a registry, the default registry is assumed. public let registryId: String? @@ -1153,7 +1268,7 @@ extension ECR { } public struct DescribeRegistryResponse: AWSDecodableShape { - /// The ID of the registry. + /// The registry ID associated with the request. public let registryId: String? /// The replication configuration for the registry. public let replicationConfiguration: ReplicationConfiguration? @@ -1224,8 +1339,60 @@ extension ECR { } } + public struct DescribeRepositoryCreationTemplatesRequest: AWSEncodableShape { + /// The maximum number of repository results returned by DescribeRepositoryCreationTemplatesRequest in paginated output. When this parameter is used, DescribeRepositoryCreationTemplatesRequest only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositoryCreationTemplatesRequest request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRepositoryCreationTemplatesRequest returns up to 100 results and a nextToken value, if applicable. + public let maxResults: Int? + /// The nextToken value returned from a previous paginated DescribeRepositoryCreationTemplates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + public let nextToken: String? + /// The repository namespace prefixes associated with the repository creation templates to describe. If this value is not specified, all repository creation templates are returned. + public let prefixes: [String]? + + public init(maxResults: Int? = nil, nextToken: String? = nil, prefixes: [String]? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.prefixes = prefixes + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.prefixes?.forEach { + try validate($0, name: "prefixes[]", parent: name, max: 256) + try validate($0, name: "prefixes[]", parent: name, min: 1) + try validate($0, name: "prefixes[]", parent: name, pattern: "^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$") + } + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + case prefixes = "prefixes" + } + } + + public struct DescribeRepositoryCreationTemplatesResponse: AWSDecodableShape { + /// The nextToken value to include in a future DescribeRepositoryCreationTemplates request. When the results of a DescribeRepositoryCreationTemplates request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return. + public let nextToken: String? + /// The registry ID associated with the request. + public let registryId: String? + /// The details of the repository creation templates. + public let repositoryCreationTemplates: [RepositoryCreationTemplate]? + + public init(nextToken: String? = nil, registryId: String? = nil, repositoryCreationTemplates: [RepositoryCreationTemplate]? = nil) { + self.nextToken = nextToken + self.registryId = registryId + self.repositoryCreationTemplates = repositoryCreationTemplates + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case registryId = "registryId" + case repositoryCreationTemplates = "repositoryCreationTemplates" + } + } + public struct EncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide. + /// The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide. public let encryptionType: EncryptionType /// If you use the KMS encryption type, specify the KMS key to use for encryption. The alias, key ID, or full ARN of the KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS key for Amazon ECR will be used. public let kmsKey: String? @@ -1246,6 +1413,28 @@ extension ECR { } } + public struct EncryptionConfigurationForRepositoryCreationTemplate: AWSEncodableShape & AWSDecodableShape { + /// The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide. + public let encryptionType: EncryptionType + /// If you use the KMS encryption type, specify the KMS key to use for encryption. The full ARN of the KMS key must be specified. The key must exist in the same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS key for Amazon ECR will be used. + public let kmsKey: String? + + public init(encryptionType: EncryptionType, kmsKey: String? = nil) { + self.encryptionType = encryptionType + self.kmsKey = kmsKey + } + + public func validate(name: String) throws { + try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^$|arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key\\/[a-z0-9-]+$") + } + + private enum CodingKeys: String, CodingKey { + case encryptionType = "encryptionType" + case kmsKey = "kmsKey" + } + } + public struct EnhancedImageScanFinding: AWSDecodableShape { /// The Amazon Web Services account ID associated with the image. public let awsAccountId: String? @@ -1315,6 +1504,41 @@ extension ECR { } } + public struct GetAccountSettingRequest: AWSEncodableShape { + /// Basic scan type version name. + public let name: String + + public init(name: String) { + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + } + } + + public struct GetAccountSettingResponse: AWSDecodableShape { + /// Retrieves the basic scan type version name. + public let name: String? + /// Retrieves the value that specifies what basic scan type is being used: AWS_NATIVE or CLAIR. + public let value: String? + + public init(name: String? = nil, value: String? = nil) { + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + public struct GetAuthorizationTokenRequest: AWSEncodableShape { /// A list of Amazon Web Services account IDs that are associated with the registries for which to get AuthorizationData objects. If you do not specify a registry, the default registry is assumed. public let registryIds: [String]? @@ -1540,7 +1764,7 @@ extension ECR { public struct GetRegistryPolicyResponse: AWSDecodableShape { /// The JSON text of the permissions policy for a registry. public let policyText: String? - /// The ID of the registry. + /// The registry ID associated with the request. public let registryId: String? public init(policyText: String? = nil, registryId: String? = nil) { @@ -1559,7 +1783,7 @@ extension ECR { } public struct GetRegistryScanningConfigurationResponse: AWSDecodableShape { - /// The ID of the registry. + /// The registry ID associated with the request. public let registryId: String? /// The scanning configuration for the registry. public let scanningConfiguration: RegistryScanningConfiguration? @@ -2214,6 +2438,45 @@ extension ECR { } } + public struct PutAccountSettingRequest: AWSEncodableShape { + /// Basic scan type version name. + public let name: String + /// Setting value that determines what basic scan type is being used: AWS_NATIVE or CLAIR. + public let value: String + + public init(name: String, value: String) { + self.name = name + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + + public struct PutAccountSettingResponse: AWSDecodableShape { + /// Retrieves the the basic scan type version name. + public let name: String? + /// Retrieves the basic scan type value, either AWS_NATIVE or -. + public let value: String? + + public init(name: String? = nil, value: String? = nil) { + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + public struct PutImageRequest: AWSEncodableShape { /// The image digest of the image manifest corresponding to the image. public let imageDigest: String? @@ -2440,7 +2703,7 @@ extension ECR { public struct PutRegistryPolicyResponse: AWSDecodableShape { /// The JSON policy text for your registry. public let policyText: String? - /// The registry ID. + /// The registry ID associated with the request. public let registryId: String? public init(policyText: String? = nil, registryId: String? = nil) { @@ -2705,6 +2968,59 @@ extension ECR { } } + public struct RepositoryCreationTemplate: AWSDecodableShape { + /// A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION + public let appliedFor: [RCTAppliedFor]? + /// The date and time, in JavaScript date format, when the repository creation template was created. + public let createdAt: Date? + /// The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template. + public let customRoleArn: String? + /// The description associated with the repository creation template. + public let description: String? + /// The encryption configuration associated with the repository creation template. + public let encryptionConfiguration: EncryptionConfigurationForRepositoryCreationTemplate? + /// The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten. + public let imageTagMutability: ImageTagMutability? + /// The lifecycle policy to use for repositories created using the template. + public let lifecyclePolicy: String? + /// The repository namespace prefix associated with the repository creation template. + public let prefix: String? + /// he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions. + public let repositoryPolicy: String? + /// The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. + public let resourceTags: [Tag]? + /// The date and time, in JavaScript date format, when the repository creation template was last updated. + public let updatedAt: Date? + + public init(appliedFor: [RCTAppliedFor]? = nil, createdAt: Date? = nil, customRoleArn: String? = nil, description: String? = nil, encryptionConfiguration: EncryptionConfigurationForRepositoryCreationTemplate? = nil, imageTagMutability: ImageTagMutability? = nil, lifecyclePolicy: String? = nil, prefix: String? = nil, repositoryPolicy: String? = nil, resourceTags: [Tag]? = nil, updatedAt: Date? = nil) { + self.appliedFor = appliedFor + self.createdAt = createdAt + self.customRoleArn = customRoleArn + self.description = description + self.encryptionConfiguration = encryptionConfiguration + self.imageTagMutability = imageTagMutability + self.lifecyclePolicy = lifecyclePolicy + self.prefix = prefix + self.repositoryPolicy = repositoryPolicy + self.resourceTags = resourceTags + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case appliedFor = "appliedFor" + case createdAt = "createdAt" + case customRoleArn = "customRoleArn" + case description = "description" + case encryptionConfiguration = "encryptionConfiguration" + case imageTagMutability = "imageTagMutability" + case lifecyclePolicy = "lifecyclePolicy" + case prefix = "prefix" + case repositoryPolicy = "repositoryPolicy" + case resourceTags = "resourceTags" + case updatedAt = "updatedAt" + } + } + public struct RepositoryFilter: AWSEncodableShape & AWSDecodableShape { /// The repository filter details. When the PREFIX_MATCH filter type is specified, this value is required and should be the repository name prefix to configure replication for. public let filter: String @@ -3128,6 +3444,78 @@ extension ECR { } } + public struct UpdateRepositoryCreationTemplateRequest: AWSEncodableShape { + /// Updates the list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION + public let appliedFor: [RCTAppliedFor]? + /// The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template. + public let customRoleArn: String? + /// A description for the repository creation template. + public let description: String? + public let encryptionConfiguration: EncryptionConfigurationForRepositoryCreationTemplate? + /// Updates the tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten. + public let imageTagMutability: ImageTagMutability? + /// Updates the lifecycle policy associated with the specified repository creation template. + public let lifecyclePolicy: String? + /// The repository namespace prefix that matches an existing repository creation template in the registry. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. This includes a repository named prod/team1 as well as a repository named prod/repository1. To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix. + public let prefix: String + /// Updates the repository policy created using the template. A repository policy is a permissions policy associated with a repository to control access permissions. + public let repositoryPolicy: String? + /// The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. + public let resourceTags: [Tag]? + + public init(appliedFor: [RCTAppliedFor]? = nil, customRoleArn: String? = nil, description: String? = nil, encryptionConfiguration: EncryptionConfigurationForRepositoryCreationTemplate? = nil, imageTagMutability: ImageTagMutability? = nil, lifecyclePolicy: String? = nil, prefix: String, repositoryPolicy: String? = nil, resourceTags: [Tag]? = nil) { + self.appliedFor = appliedFor + self.customRoleArn = customRoleArn + self.description = description + self.encryptionConfiguration = encryptionConfiguration + self.imageTagMutability = imageTagMutability + self.lifecyclePolicy = lifecyclePolicy + self.prefix = prefix + self.repositoryPolicy = repositoryPolicy + self.resourceTags = resourceTags + } + + public func validate(name: String) throws { + try self.validate(self.customRoleArn, name: "customRoleArn", parent: name, max: 2048) + try self.validate(self.description, name: "description", parent: name, max: 256) + try self.encryptionConfiguration?.validate(name: "\(name).encryptionConfiguration") + try self.validate(self.lifecyclePolicy, name: "lifecyclePolicy", parent: name, max: 30720) + try self.validate(self.prefix, name: "prefix", parent: name, max: 256) + try self.validate(self.prefix, name: "prefix", parent: name, min: 1) + try self.validate(self.prefix, name: "prefix", parent: name, pattern: "^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$") + try self.validate(self.repositoryPolicy, name: "repositoryPolicy", parent: name, max: 10240) + } + + private enum CodingKeys: String, CodingKey { + case appliedFor = "appliedFor" + case customRoleArn = "customRoleArn" + case description = "description" + case encryptionConfiguration = "encryptionConfiguration" + case imageTagMutability = "imageTagMutability" + case lifecyclePolicy = "lifecyclePolicy" + case prefix = "prefix" + case repositoryPolicy = "repositoryPolicy" + case resourceTags = "resourceTags" + } + } + + public struct UpdateRepositoryCreationTemplateResponse: AWSDecodableShape { + /// The registry ID associated with the request. + public let registryId: String? + /// The details of the repository creation template associated with the request. + public let repositoryCreationTemplate: RepositoryCreationTemplate? + + public init(registryId: String? = nil, repositoryCreationTemplate: RepositoryCreationTemplate? = nil) { + self.registryId = registryId + self.repositoryCreationTemplate = repositoryCreationTemplate + } + + private enum CodingKeys: String, CodingKey { + case registryId = "registryId" + case repositoryCreationTemplate = "repositoryCreationTemplate" + } + } + public struct UploadLayerPartRequest: AWSEncodableShape { /// The base64-encoded layer part payload. public let layerPartBlob: AWSBase64Data @@ -3330,6 +3718,8 @@ public struct ECRErrorType: AWSErrorType { case scanNotFoundException = "ScanNotFoundException" case secretNotFoundException = "SecretNotFoundException" case serverException = "ServerException" + case templateAlreadyExistsException = "TemplateAlreadyExistsException" + case templateNotFoundException = "TemplateNotFoundException" case tooManyTagsException = "TooManyTagsException" case unableToAccessSecretException = "UnableToAccessSecretException" case unableToDecryptSecretValueException = "UnableToDecryptSecretValueException" @@ -3417,6 +3807,10 @@ public struct ECRErrorType: AWSErrorType { public static var secretNotFoundException: Self { .init(.secretNotFoundException) } /// These errors are usually caused by a server-side issue. public static var serverException: Self { .init(.serverException) } + /// The repository creation template already exists. Specify a unique prefix and try again. + public static var templateAlreadyExistsException: Self { .init(.templateAlreadyExistsException) } + /// The specified repository creation template can't be found. Verify the registry ID and prefix and try again. + public static var templateNotFoundException: Self { .init(.templateNotFoundException) } /// The list of tags on the repository is over the limit. The maximum number of tags that can be applied to a repository is 50. public static var tooManyTagsException: Self { .init(.tooManyTagsException) } /// The secret is unable to be accessed. Verify the resource permissions for the secret and try again. diff --git a/Sources/Soto/Services/ECS/ECS_api.swift b/Sources/Soto/Services/ECS/ECS_api.swift index 4a7b85fc39..b5a8b4ad62 100644 --- a/Sources/Soto/Services/ECS/ECS_api.swift +++ b/Sources/Soto/Services/ECS/ECS_api.swift @@ -191,8 +191,8 @@ public struct ECS: AWSService { /// percent and maximum percent values aren't used. This is the case even if they're /// currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you /// can specify only parameters that aren't controlled at the task set level. The only - /// required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information - /// about task placement and task placement strategies, see Amazon ECS + /// required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For + /// information about task placement and task placement strategies, see Amazon ECS /// task placement in the Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. @Sendable public func createService(_ input: CreateServiceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServiceResponse { @@ -209,7 +209,7 @@ public struct ECS: AWSService { /// Create a task set in the specified cluster and service. This is used when a service /// uses the EXTERNAL deployment controller type. For more information, see /// Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and otther quotas, see Amazon ECS + /// types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and other quotas, see Amazon ECS /// service quotas in the Amazon Elastic Container Service Developer Guide. @Sendable public func createTaskSet(_ input: CreateTaskSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTaskSetResponse { @@ -794,9 +794,7 @@ public struct ECS: AWSService { /// CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the /// policy that's associated with the role. For more information, see IAM /// Roles for Tasks in the Amazon Elastic Container Service Developer Guide. You can specify a Docker networking mode for the containers in your task definition - /// with the networkMode parameter. The available network modes correspond to - /// those described in Network - /// settings in the Docker run reference. If you specify the awsvpc + /// with the networkMode parameter. If you specify the awsvpc /// network mode, the task is allocated an elastic network interface, and you must specify a /// NetworkConfiguration when you create a service or run a task with /// the task definition. For more information, see Task Networking @@ -863,8 +861,8 @@ public struct ECS: AWSService { /// SIGTERM value and a default 30-second timeout, after which the /// SIGKILL value is sent and the containers are forcibly stopped. If the /// container handles the SIGTERM value gracefully and exits within 30 seconds - /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by sending - /// a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown + /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by + /// sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown /// of (Windows) container #25982 on GitHub. The default 30-second timeout can be configured on the Amazon ECS container agent with /// the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see /// Amazon ECS Container Agent Configuration in the diff --git a/Sources/Soto/Services/ECS/ECS_shapes.swift b/Sources/Soto/Services/ECS/ECS_shapes.swift index 2caef261ee..132c6c0edc 100644 --- a/Sources/Soto/Services/ECS/ECS_shapes.swift +++ b/Sources/Soto/Services/ECS/ECS_shapes.swift @@ -989,14 +989,13 @@ extension ECS { public struct ContainerDefinition: AWSEncodableShape & AWSDecodableShape { /// The command that's passed to the container. This parameter maps to Cmd in - /// the Create a container section of the Docker Remote API and the + /// the docker create-container command and the /// COMMAND parameter to docker - /// run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each + /// run. If there are multiple arguments, each /// argument is a separated string in the array. public let command: [String]? /// The number of cpu units reserved for the container. This parameter maps - /// to CpuShares in the Create a container section of the - /// Docker Remote API and the --cpu-shares option to docker run. This field is optional for tasks using the Fargate launch type, and the + /// to CpuShares in the docker create-container commandand the --cpu-shares option to docker run. This field is optional for tasks using the Fargate launch type, and the /// only requirement is that the total amount of CPU reserved for all containers within a /// task be lower than the task-level cpu value. You can determine the number of CPU units that are available per EC2 instance type /// by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page @@ -1009,17 +1008,18 @@ extension ECS { /// guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float /// to higher CPU usage if the other container was not using it. If both tasks were 100% /// active all of the time, they would be limited to 512 CPU units. On Linux container instances, the Docker daemon on the container instance uses the CPU - /// value to calculate the relative CPU share ratios for running containers. For more - /// information, see CPU share - /// constraint in the Docker documentation. The minimum valid CPU share value - /// that the Linux kernel allows is 2. However, the CPU parameter isn't required, and you - /// can use CPU values below 2 in your container definitions. For CPU values below 2 - /// (including null), the behavior varies based on your Amazon ECS container agent + /// value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value + /// that the Linux kernel allows is 2, and the + /// maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you + /// can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 + /// (including null) or above 262144, the behavior varies based on your Amazon ECS container agent /// version: Agent versions less than or equal to 1.1.0: /// Null and zero CPU values are passed to Docker as 0, which Docker then converts /// to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux /// kernel converts to two CPU shares. Agent versions greater than or equal to 1.2.0: - /// Null, zero, and CPU values of 1 are passed to Docker as 2. On Windows container instances, the CPU limit is enforced as an absolute limit, or a + /// Null, zero, and CPU values of 1 are passed to Docker as 2. Agent versions greater than or equal to + /// 1.84.0: CPU values greater than 256 vCPU are passed to Docker as + /// 256, which is equivalent to 262144 CPU shares. On Windows container instances, the CPU limit is enforced as an absolute limit, or a /// quota. Windows containers only have access to the specified amount of CPU that's /// described in the task definition. A null or zero CPU value is passed to Docker as /// 0, which Windows interprets as 1% of one CPU. @@ -1058,59 +1058,47 @@ extension ECS { /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. public let dependsOn: [ContainerDependency]? /// When this parameter is true, networking is off within the container. This parameter - /// maps to NetworkDisabled in the Create a container section - /// of the Docker Remote API. This parameter is not supported for Windows containers. + /// maps to NetworkDisabled in the docker create-container command. This parameter is not supported for Windows containers. public let disableNetworking: Bool? /// A list of DNS search domains that are presented to the container. This parameter maps - /// to DnsSearch in the Create a container section of the - /// Docker Remote API and the --dns-search option to docker run. This parameter is not supported for Windows containers. + /// to DnsSearch in the docker create-container command and the --dns-search option to docker run. This parameter is not supported for Windows containers. public let dnsSearchDomains: [String]? /// A list of DNS servers that are presented to the container. This parameter maps to - /// Dns in the Create a container section of the - /// Docker Remote API and the --dns option to docker run. This parameter is not supported for Windows containers. + /// Dns in the the docker create-container command and the --dns option to docker run. This parameter is not supported for Windows containers. public let dnsServers: [String]? /// A key/value map of labels to add to the container. This parameter maps to - /// Labels in the Create a container section of the - /// Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' + /// Labels in the docker create-container command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' public let dockerLabels: [String: String]? - /// A list of strings to provide custom configuration for multiple security systems. For - /// more information about valid values, see Docker - /// Run Security Configuration. This field isn't valid for containers in tasks + /// A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks /// using the Fargate launch type. For Linux tasks on EC2, this parameter can be used to reference custom /// labels for SELinux and AppArmor multi-level security systems. For any tasks on EC2, this parameter can be used to reference a /// credential spec file that configures a container for Active Directory authentication. /// For more information, see Using gMSAs for Windows /// Containers and Using gMSAs for Linux - /// Containers in the Amazon Elastic Container Service Developer Guide. This parameter maps to SecurityOpt in the - /// Create a container section of the Docker Remote API and the + /// Containers in the Amazon Elastic Container Service Developer Guide. This parameter maps to SecurityOpt in the docker create-container command and the /// --security-opt option to docker /// run. The Amazon ECS container agent running on a container instance must register with the /// ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true /// environment variables before containers placed on that instance can use these /// security options. For more information, see Amazon ECS Container - /// Agent Configuration in the Amazon Elastic Container Service Developer Guide. For more information about valid values, see Docker - /// Run Security Configuration. Valid values: "no-new-privileges" | "apparmor:PROFILE" | "label:value" | + /// Agent Configuration in the Amazon Elastic Container Service Developer Guide. Valid values: "no-new-privileges" | "apparmor:PROFILE" | "label:value" | /// "credentialspec:CredentialSpecFilePath" public let dockerSecurityOptions: [String]? /// Early versions of the Amazon ECS container agent don't properly handle /// entryPoint parameters. If you have problems using /// entryPoint, update your container agent or enter your commands and /// arguments as command array items instead. The entry point that's passed to the container. This parameter maps to - /// Entrypoint in the Create a container section of the - /// Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint. + /// Entrypoint in tthe docker create-container command and the --entrypoint option to docker run. public let entryPoint: [String]? /// The environment variables to pass to a container. This parameter maps to - /// Env in the Create a container section of the - /// Docker Remote API and the --env option to docker run. We don't recommend that you use plaintext environment variables for sensitive + /// Env in the docker create-container command and the --env option to docker run. We don't recommend that you use plaintext environment variables for sensitive /// information, such as credential data. public let environment: [KeyValuePair]? /// A list of files containing the environment variables to pass to a container. This /// parameter maps to the --env-file option to docker run. You can specify up to ten environment files. The file must have a .env /// file extension. Each line in an environment file contains an environment variable in /// VARIABLE=VALUE format. Lines beginning with # are treated - /// as comments and are ignored. For more information about the environment variable file - /// syntax, see Declare default - /// environment variables in file. If there are environment variables specified using the environment + /// as comments and are ignored. If there are environment variables specified using the environment /// parameter in a container definition, they take precedence over the variables contained /// within an environment file. If multiple environment files are specified that contain the /// same variable, they're processed from the top down. We recommend that you use unique @@ -1128,8 +1116,7 @@ extension ECS { /// Architecture in the Amazon Elastic Container Service Developer Guide. public let essential: Bool? /// A list of hostnames and IP address mappings to append to the /etc/hosts - /// file on the container. This parameter maps to ExtraHosts in the - /// Create a container section of the Docker Remote API and the + /// file on the container. This parameter maps to ExtraHosts in the docker create-container command and the /// --add-host option to docker /// run. This parameter isn't supported for Windows containers or tasks that use the /// awsvpc network mode. @@ -1139,21 +1126,19 @@ extension ECS { /// in the Amazon Elastic Container Service Developer Guide. public let firelensConfiguration: FirelensConfiguration? /// The container health check command and associated configuration parameters for the - /// container. This parameter maps to HealthCheck in the - /// Create a container section of the Docker Remote API and the + /// container. This parameter maps to HealthCheck in the docker create-container command and the /// HEALTHCHECK parameter of docker /// run. public let healthCheck: HealthCheck? /// The hostname to use for your container. This parameter maps to Hostname - /// in the Create a container section of the Docker Remote API and the + /// in thethe docker create-container command and the /// --hostname option to docker /// run. The hostname parameter is not supported if you're using the /// awsvpc network mode. public let hostname: String? /// The image used to start a container. This string is passed directly to the Docker /// daemon. By default, images in the Docker Hub registry are available. Other repositories - /// are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the - /// Create a container section of the Docker Remote API and the + /// are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker create-container command and the /// IMAGE parameter of docker /// run. When a new task starts, the Amazon ECS container agent pulls the latest version of /// the specified image and tag for the container to use. However, subsequent @@ -1170,17 +1155,13 @@ extension ECS { public let image: String? /// When this parameter is true, you can deploy containerized applications /// that require stdin or a tty to be allocated. This parameter - /// maps to OpenStdin in the Create a container section of the - /// Docker Remote API and the --interactive option to docker run. + /// maps to OpenStdin in the docker create-container command and the --interactive option to docker run. public let interactive: Bool? /// The links parameter allows containers to communicate with each other /// without the need for port mappings. This parameter is only supported if the network mode /// of a task definition is bridge. The name:internalName /// construct is analogous to name:alias in Docker links. - /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to - /// Legacy container links - /// in the Docker documentation. This parameter maps to Links in the - /// Create a container section of the Docker Remote API and the + /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker create-container command and the /// --link option to docker /// run. This parameter is not supported for Windows containers. Containers that are collocated on a single container instance may be able to /// communicate with each other without requiring links or host port mappings. Network @@ -1190,17 +1171,14 @@ extension ECS { /// Linux-specific modifications that are applied to the container, such as Linux kernel /// capabilities. For more information see KernelCapabilities. This parameter is not supported for Windows containers. public let linuxParameters: LinuxParameters? - /// The log configuration specification for the container. This parameter maps to LogConfig in the - /// Create a container section of the Docker Remote API and the + /// The log configuration specification for the container. This parameter maps to LogConfig in the docker create-container command and the /// --log-driver option to docker /// run. By default, containers use the same logging driver that the Docker /// daemon uses. However the container can use a different logging driver than the Docker /// daemon by specifying a log driver with this parameter in the container definition. To /// use a different logging driver for a container, the log system must be configured /// properly on the container instance (or on a different log server for remote logging - /// options). For more information about the options for different supported log drivers, - /// see Configure - /// logging drivers in the Docker documentation. Amazon ECS currently supports a subset of the logging drivers available to the Docker + /// options). Amazon ECS currently supports a subset of the logging drivers available to the Docker /// daemon (shown in the LogConfiguration data type). Additional log /// drivers may be available in future releases of the Amazon ECS container agent. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' The Amazon ECS container agent running on a container instance must register the /// logging drivers available on that instance with the @@ -1213,8 +1191,7 @@ extension ECS { /// to exceed the memory specified here, the container is killed. The total amount of memory /// reserved for all containers within a task must be lower than the task /// memory value, if one is specified. This parameter maps to - /// Memory in the Create a container section of the - /// Docker Remote API and the --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level + /// Memory in thethe docker create-container command and the --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level /// memory value or a container-level memory value. If you specify both a container-level /// memory and memoryReservation value, memory /// must be greater than memoryReservation. If you specify @@ -1229,8 +1206,7 @@ extension ECS { /// However, your container can consume more memory when it needs to, up to either the hard /// limit specified with the memory parameter (if applicable), or all of the /// available memory on the container instance, whichever comes first. This parameter maps - /// to MemoryReservation in the Create a container section of - /// the Docker Remote API and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for + /// to MemoryReservation in the the docker create-container command and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for /// one or both of memory or memoryReservation in a container /// definition. If you specify both, memory must be greater than /// memoryReservation. If you specify memoryReservation, then @@ -1245,16 +1221,14 @@ extension ECS { /// container. So, don't specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a /// container. So, don't specify less than 4 MiB of memory for your containers. public let memoryReservation: Int? - /// The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container - /// section of the Docker Remote API and the --volume option to docker run. Windows containers can mount whole directories on the same drive as + /// The mount points for data volumes in your container. This parameter maps to Volumes in the the docker create-container command and the --volume option to docker run. Windows containers can mount whole directories on the same drive as /// $env:ProgramData. Windows containers can't mount directories on a /// different drive, and mount point can't be across drives. public let mountPoints: [MountPoint]? /// The name of a container. If you're linking multiple containers together in a task /// definition, the name of one container can be entered in the /// links of another container to connect the containers. - /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the - /// Create a container section of the Docker Remote API and the + /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in tthe docker create-container command and the /// --name option to docker /// run. public let name: String? @@ -1264,7 +1238,7 @@ extension ECS { /// be the same value as the containerPort. Port mappings on Windows use the NetNAT gateway address rather than /// localhost. There's no loopback for port mappings on Windows, so you /// can't access a container's mapped port from the host itself. This parameter maps to PortBindings in the - /// Create a container section of the Docker Remote API and the + /// the docker create-container command and the /// --publish option to docker /// run. If the network mode of a task definition is set to none, /// then you can't specify port mappings. If the network mode of a task definition is set to @@ -1278,16 +1252,13 @@ extension ECS { public let portMappings: [PortMapping]? /// When this parameter is true, the container is given elevated privileges on the host /// container instance (similar to the root user). This parameter maps to - /// Privileged in the Create a container section of the - /// Docker Remote API and the --privileged option to docker run. This parameter is not supported for Windows containers or tasks run on Fargate. + /// Privileged in the the docker create-container command and the --privileged option to docker run This parameter is not supported for Windows containers or tasks run on Fargate. public let privileged: Bool? /// When this parameter is true, a TTY is allocated. This parameter maps to - /// Tty in the Create a container section of the - /// Docker Remote API and the --tty option to docker run. + /// Tty in tthe docker create-container command and the --tty option to docker run. public let pseudoTerminal: Bool? /// When this parameter is true, the container is given read-only access to its root file - /// system. This parameter maps to ReadonlyRootfs in the - /// Create a container section of the Docker Remote API and the + /// system. This parameter maps to ReadonlyRootfs in the docker create-container command and the /// --read-only option to docker /// run. This parameter is not supported for Windows containers. public let readonlyRootFilesystem: Bool? @@ -1296,6 +1267,9 @@ extension ECS { /// The type and amount of a resource to assign to a container. The only supported /// resource is a GPU. public let resourceRequirements: [ResourceRequirement]? + /// The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the + /// task. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide. + public let restartPolicy: ContainerRestartPolicy? /// The secrets to pass to the container. For more information, see Specifying /// Sensitive Data in the Amazon Elastic Container Service Developer Guide. public let secrets: [Secret]? @@ -1317,7 +1291,7 @@ extension ECS { /// package. If your container instances are launched from version 20190301 or /// later, then they contain the required versions of the container agent and /// ecs-init. For more information, see Amazon ECS-optimized Linux AMI - /// in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds. + /// in the Amazon Elastic Container Service Developer Guide. The valid values for Fargate are 2-120 seconds. public let startTimeout: Int? /// Time duration (in seconds) to wait before the container is forcefully killed if it /// doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires @@ -1337,25 +1311,22 @@ extension ECS { /// container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds. public let stopTimeout: Int? /// A list of namespaced kernel parameters to set in the container. This parameter maps to - /// Sysctls in the Create a container section of the - /// Docker Remote API and the --sysctl option to docker run. For example, you can configure + /// Sysctls in tthe docker create-container command and the --sysctl option to docker run. For example, you can configure /// net.ipv4.tcp_keepalive_time setting to maintain longer lived /// connections. public let systemControls: [SystemControl]? /// A list of ulimits to set in the container. If a ulimit value /// is specified in a task definition, it overrides the default values set by Docker. This - /// parameter maps to Ulimits in the Create a container section - /// of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed + /// parameter maps to Ulimits in tthe docker create-container command and the --ulimit option to docker run. Valid naming values are displayed /// in the Ulimit data type. Amazon ECS tasks hosted on Fargate use the default /// resource limit values set by the operating system with the exception of /// the nofile resource limit parameter which Fargate /// overrides. The nofile resource limit sets a restriction on /// the number of open files that a container can use. The default - /// nofile soft limit is 1024 and the default hard limit + /// nofile soft limit is 65535 and the default hard limit /// is 65535. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' This parameter is not supported for Windows containers. public let ulimits: [Ulimit]? - /// The user to use inside the container. This parameter maps to User in the - /// Create a container section of the Docker Remote API and the + /// The user to use inside the container. This parameter maps to User in the docker create-container command and the /// --user option to docker /// run. When running tasks using the host network mode, don't run containers /// using the root user (UID 0). We recommend using a non-root user for better @@ -1363,15 +1334,13 @@ extension ECS { /// or GID, you must specify it as a positive integer. user user:group uid uid:gid user:gid uid:group This parameter is not supported for Windows containers. public let user: String? /// Data volumes to mount from another container. This parameter maps to - /// VolumesFrom in the Create a container section of the - /// Docker Remote API and the --volumes-from option to docker run. + /// VolumesFrom in tthe docker create-container command and the --volumes-from option to docker run. public let volumesFrom: [VolumeFrom]? /// The working directory to run commands inside the container in. This parameter maps to - /// WorkingDir in the Create a container section of the - /// Docker Remote API and the --workdir option to docker run. + /// WorkingDir in the docker create-container command and the --workdir option to docker run. public let workingDirectory: String? - public init(command: [String]? = nil, cpu: Int? = nil, credentialSpecs: [String]? = nil, dependsOn: [ContainerDependency]? = nil, disableNetworking: Bool? = nil, dnsSearchDomains: [String]? = nil, dnsServers: [String]? = nil, dockerLabels: [String: String]? = nil, dockerSecurityOptions: [String]? = nil, entryPoint: [String]? = nil, environment: [KeyValuePair]? = nil, environmentFiles: [EnvironmentFile]? = nil, essential: Bool? = nil, extraHosts: [HostEntry]? = nil, firelensConfiguration: FirelensConfiguration? = nil, healthCheck: HealthCheck? = nil, hostname: String? = nil, image: String? = nil, interactive: Bool? = nil, links: [String]? = nil, linuxParameters: LinuxParameters? = nil, logConfiguration: LogConfiguration? = nil, memory: Int? = nil, memoryReservation: Int? = nil, mountPoints: [MountPoint]? = nil, name: String? = nil, portMappings: [PortMapping]? = nil, privileged: Bool? = nil, pseudoTerminal: Bool? = nil, readonlyRootFilesystem: Bool? = nil, repositoryCredentials: RepositoryCredentials? = nil, resourceRequirements: [ResourceRequirement]? = nil, secrets: [Secret]? = nil, startTimeout: Int? = nil, stopTimeout: Int? = nil, systemControls: [SystemControl]? = nil, ulimits: [Ulimit]? = nil, user: String? = nil, volumesFrom: [VolumeFrom]? = nil, workingDirectory: String? = nil) { + public init(command: [String]? = nil, cpu: Int? = nil, credentialSpecs: [String]? = nil, dependsOn: [ContainerDependency]? = nil, disableNetworking: Bool? = nil, dnsSearchDomains: [String]? = nil, dnsServers: [String]? = nil, dockerLabels: [String: String]? = nil, dockerSecurityOptions: [String]? = nil, entryPoint: [String]? = nil, environment: [KeyValuePair]? = nil, environmentFiles: [EnvironmentFile]? = nil, essential: Bool? = nil, extraHosts: [HostEntry]? = nil, firelensConfiguration: FirelensConfiguration? = nil, healthCheck: HealthCheck? = nil, hostname: String? = nil, image: String? = nil, interactive: Bool? = nil, links: [String]? = nil, linuxParameters: LinuxParameters? = nil, logConfiguration: LogConfiguration? = nil, memory: Int? = nil, memoryReservation: Int? = nil, mountPoints: [MountPoint]? = nil, name: String? = nil, portMappings: [PortMapping]? = nil, privileged: Bool? = nil, pseudoTerminal: Bool? = nil, readonlyRootFilesystem: Bool? = nil, repositoryCredentials: RepositoryCredentials? = nil, resourceRequirements: [ResourceRequirement]? = nil, restartPolicy: ContainerRestartPolicy? = nil, secrets: [Secret]? = nil, startTimeout: Int? = nil, stopTimeout: Int? = nil, systemControls: [SystemControl]? = nil, ulimits: [Ulimit]? = nil, user: String? = nil, volumesFrom: [VolumeFrom]? = nil, workingDirectory: String? = nil) { self.command = command self.cpu = cpu self.credentialSpecs = credentialSpecs @@ -1404,6 +1373,7 @@ extension ECS { self.readonlyRootFilesystem = readonlyRootFilesystem self.repositoryCredentials = repositoryCredentials self.resourceRequirements = resourceRequirements + self.restartPolicy = restartPolicy self.secrets = secrets self.startTimeout = startTimeout self.stopTimeout = stopTimeout @@ -1447,6 +1417,7 @@ extension ECS { case readonlyRootFilesystem = "readonlyRootFilesystem" case repositoryCredentials = "repositoryCredentials" case resourceRequirements = "resourceRequirements" + case restartPolicy = "restartPolicy" case secrets = "secrets" case startTimeout = "startTimeout" case stopTimeout = "stopTimeout" @@ -1681,6 +1652,33 @@ extension ECS { } } + public struct ContainerRestartPolicy: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether a restart policy is enabled for the + /// container. + public let enabled: Bool + /// A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit + /// codes. By default, Amazon ECS does not ignore + /// any exit codes. + public let ignoredExitCodes: [Int]? + /// A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be + /// restarted only once every restartAttemptPeriod seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum + /// restartAttemptPeriod of 60 seconds and a maximum restartAttemptPeriod of 1800 seconds. + /// By default, a container must run for 300 seconds before it can be restarted. + public let restartAttemptPeriod: Int? + + public init(enabled: Bool, ignoredExitCodes: [Int]? = nil, restartAttemptPeriod: Int? = nil) { + self.enabled = enabled + self.ignoredExitCodes = ignoredExitCodes + self.restartAttemptPeriod = restartAttemptPeriod + } + + private enum CodingKeys: String, CodingKey { + case enabled = "enabled" + case ignoredExitCodes = "ignoredExitCodes" + case restartAttemptPeriod = "restartAttemptPeriod" + } + } + public struct ContainerStateChange: AWSEncodableShape { /// The name of the container. public let containerName: String? @@ -1894,8 +1892,8 @@ extension ECS { /// defined and you don't specify a health check grace period value, the default value of /// 0 is used. If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod in /// the task definition health check parameters. For more information, see Health - /// check. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can - /// specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). + /// check. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you + /// can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). /// During that time, the Amazon ECS service scheduler ignores health check status. This grace /// period can prevent the service scheduler from marking tasks as unhealthy and stopping /// them before they have time to come up. @@ -1903,8 +1901,8 @@ extension ECS { /// The infrastructure that you run your service on. For more information, see Amazon ECS /// launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand /// infrastructure. Fargate Spot infrastructure is available for use but a capacity provider - /// strategy must be used. For more information, see Fargate capacity providers in the - /// Amazon ECS Developer Guide. The EC2 launch type runs your tasks on Amazon EC2 instances registered to your + /// strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS + /// Developer Guide. The EC2 launch type runs your tasks on Amazon EC2 instances registered to your /// cluster. The EXTERNAL launch type runs your tasks on your on-premises server or /// virtual machine (VM) capacity registered to your cluster. A service can use either a launch type or a capacity provider strategy. If a /// launchType is specified, the capacityProviderStrategy @@ -1956,11 +1954,14 @@ extension ECS { /// The platform version that your tasks in the service are running on. A platform version /// is specified only for tasks using the Fargate launch type. If one isn't /// specified, the LATEST platform version is used. For more information, see - /// Fargate platform versions in the Amazon Elastic Container Service Developer Guide. + /// Fargate platform + /// versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? /// Specifies whether to propagate the tags from the task definition to the task. If no /// value is specified, the tags aren't propagated. Tags can only be propagated to the task - /// during task creation. To add tags to a task after task creation, use the TagResource API action. You must set this to a value other than NONE when you use Cost Explorer. For more information, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide. The default is NONE. + /// during task creation. To add tags to a task after task creation, use the TagResource API action. You must set this to a value other than NONE when you use Cost Explorer. + /// For more information, see Amazon ECS usage reports + /// in the Amazon Elastic Container Service Developer Guide. The default is NONE. public let propagateTags: PropagateTags? /// The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your /// load balancer on your behalf. This parameter is only permitted if you are using a load @@ -2687,14 +2688,12 @@ extension ECS { /// total. If a task has an essential container with a health check defined, the service /// scheduler will wait for both the task to reach a healthy status and the load /// balancer target group health check to return a healthy status before counting - /// the task towards the minimum healthy percent total. The default value for a replica service for - /// minimumHealthyPercent is 100%. The default - /// minimumHealthyPercent value for a service using - /// the DAEMON service schedule is 0% for the CLI, - /// the Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console. The minimum number of healthy tasks during a deployment is the - /// desiredCount multiplied by the - /// minimumHealthyPercent/100, rounded up to the - /// nearest integer value. If a service is using either the blue/green (CODE_DEPLOY) or + /// the task towards the minimum healthy percent total. The default value for a replica service for minimumHealthyPercent is + /// 100%. The default minimumHealthyPercent value for a service using the + /// DAEMON service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the + /// APIs and 50% for the Amazon Web Services Management Console. The minimum number of healthy tasks during a deployment is the + /// desiredCount multiplied by the minimumHealthyPercent/100, + /// rounded up to the nearest integer value. If a service is using either the blue/green (CODE_DEPLOY) or /// EXTERNAL deployment types and is running tasks that use the /// EC2 launch type, the minimum healthy /// percent value is set to the default value and is used to define the lower @@ -2743,7 +2742,8 @@ extension ECS { } public struct DeploymentEphemeralStorage: AWSDecodableShape { - /// Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment. + /// Specify an Key Management Service key ID to encrypt the ephemeral storage for + /// deployment. public let kmsKeyId: String? public init(kmsKeyId: String? = nil) { @@ -3222,20 +3222,16 @@ extension ECS { /// by Docker because it is used for task placement. If the driver was installed using the /// Docker plugin CLI, use docker plugin ls to retrieve the driver name from /// your container instance. If the driver was installed using another method, use Docker - /// plugin discovery to retrieve the driver name. For more information, see Docker - /// plugin discovery. This parameter maps to Driver in the - /// Create a volume section of the Docker Remote API and the + /// plugin discovery to retrieve the driver name. This parameter maps to Driver in the docker create-container command and the /// xxdriver option to docker /// volume create. public let driver: String? /// A map of Docker driver-specific options passed through. This parameter maps to - /// DriverOpts in the Create a volume section of the - /// Docker Remote API and the xxopt option to docker + /// DriverOpts in the docker create-volume command and the xxopt option to docker /// volume create. public let driverOpts: [String: String]? /// Custom metadata to add to your Docker volume. This parameter maps to - /// Labels in the Create a volume section of the - /// Docker Remote API and the xxlabel option to docker + /// Labels in the docker create-container command and the xxlabel option to docker /// volume create. public let labels: [String: String]? /// The scope for the Docker volume that determines its lifecycle. Docker volumes that are @@ -3360,8 +3356,8 @@ extension ECS { } public struct EnvironmentFile: AWSEncodableShape & AWSDecodableShape { - /// The file type to use. Environment files are objects in Amazon S3. The only supported value is - /// s3. + /// The file type to use. Environment files are objects in Amazon S3. The only supported value + /// is s3. public let type: EnvironmentFileType /// The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment /// variable file. @@ -3379,8 +3375,8 @@ extension ECS { } public struct EphemeralStorage: AWSEncodableShape & AWSDecodableShape { - /// The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported - /// value is 20 GiB and the maximum supported value is + /// The total amount, in GiB, of ephemeral storage to set for the task. The minimum + /// supported value is 20 GiB and the maximum supported value is /// 200 GiB. public let sizeInGiB: Int @@ -3654,8 +3650,7 @@ extension ECS { /// directly, or CMD-SHELL to run the command with the container's default /// shell. When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list /// of commands in double quotes and brackets. [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] You don't include the double quotes and brackets when you use the Amazon Web Services Management Console. CMD-SHELL, curl -f http://localhost/ || exit 1 An exit code of 0 indicates success, and non-zero exit code indicates failure. For - /// more information, see HealthCheck in the Create a container - /// section of the Docker Remote API. + /// more information, see HealthCheck in tthe docker create-container command public let command: [String] /// The time period in seconds between each health check execution. You may specify /// between 5 and 300 seconds. The default value is 30 seconds. @@ -3793,8 +3788,7 @@ extension ECS { public struct KernelCapabilities: AWSEncodableShape & AWSDecodableShape { /// The Linux capabilities for the container that have been added to the default - /// configuration provided by Docker. This parameter maps to CapAdd in the - /// Create a container section of the Docker Remote API and the + /// configuration provided by Docker. This parameter maps to CapAdd in the docker create-container command and the /// --cap-add option to docker /// run. Tasks launched on Fargate only support adding the SYS_PTRACE kernel /// capability. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | @@ -3807,8 +3801,7 @@ extension ECS { /// "WAKE_ALARM" public let add: [String]? /// The Linux capabilities for the container that have been removed from the default - /// configuration provided by Docker. This parameter maps to CapDrop in the - /// Create a container section of the Docker Remote API and the + /// configuration provided by Docker. This parameter maps to CapDrop in the docker create-container command and the /// --cap-drop option to docker /// run. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | /// "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" | @@ -3858,8 +3851,7 @@ extension ECS { /// later. public let capabilities: KernelCapabilities? /// Any host devices to expose to the container. This parameter maps to - /// Devices in the Create a container section of the - /// Docker Remote API and the --device option to docker run. If you're using tasks that use the Fargate launch type, the + /// Devices in tthe docker create-container command and the --device option to docker run. If you're using tasks that use the Fargate launch type, the /// devices parameter isn't supported. public let devices: [Device]? /// Run an init process inside the container that forwards signals and reaps @@ -4613,10 +4605,10 @@ extension ECS { /// The log driver to use for the container. For tasks on Fargate, the supported log drivers are awslogs, /// splunk, and awsfirelens. For tasks hosted on Amazon EC2 instances, the supported log drivers are /// awslogs, fluentd, gelf, - /// json-file, journald, - /// logentries,syslog, splunk, and - /// awsfirelens. For more information about using the awslogs log driver, see Using - /// the awslogs log driver in the Amazon Elastic Container Service Developer Guide. For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide. If you have a custom driver that isn't listed, you can fork the Amazon ECS container + /// json-file, journald, syslog, + /// splunk, and awsfirelens. For more information about using the awslogs log driver, see Send + /// Amazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide. For more information about using the awsfirelens log driver, see Send + /// Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner. If you have a custom driver that isn't listed, you can fork the Amazon ECS container /// agent project that's available /// on GitHub and customize it to work with that driver. We encourage you to /// submit pull requests for changes that you would like to have included. However, we @@ -5445,7 +5437,7 @@ extension ECS { /// tasks in the Amazon ECS Developer Guide. For tasks using the Fargate launch type, the task requires the /// following platforms: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later. public let ephemeralStorage: EphemeralStorage? - /// The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide. + /// The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let executionRoleArn: String? /// You must specify a family for a task definition. You can use it track /// multiple versions of the same task definition. The family is used as a name @@ -5453,7 +5445,7 @@ extension ECS { public let family: String /// The Elastic Inference accelerators to use for the containers in the task. public let inferenceAccelerators: [InferenceAccelerator]? - /// The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. + /// The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. public let ipcMode: IpcMode? /// The amount of memory (in MiB) used by the task. It can be expressed as an integer /// using MiB (for example ,1024) or as a string using GB (for example, @@ -5465,9 +5457,9 @@ extension ECS { /// cpu parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on /// Fargate. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. public let memory: String? - /// The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. For more information, see Network settings in the Docker run reference. + /// The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. public let networkMode: NetworkMode? - /// The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate. + /// The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate. public let pidMode: PidMode? /// An array of placement constraint objects to use for the task. You can specify a /// maximum of 10 constraints for each task. This limit includes constraints in the task @@ -5624,11 +5616,11 @@ extension ECS { public struct ResourceRequirement: AWSEncodableShape & AWSDecodableShape { /// The type of resource to assign to a container. public let type: ResourceType - /// The value for the specified resource type. When the type is GPU, the value is the number of physical GPUs the - /// Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for - /// all containers in a task can't exceed the number of available GPUs on the container - /// instance that the task is launched on. When the type is InferenceAccelerator, the value matches - /// the deviceName for an InferenceAccelerator specified in a task definition. + /// The value for the specified resource type. When the type is GPU, the value is the number of physical + /// GPUs the Amazon ECS container agent reserves for the container. The number + /// of GPUs that's reserved for all containers in a task can't exceed the number of + /// available GPUs on the container instance that the task is launched on. When the type is InferenceAccelerator, the value matches the + /// deviceName for an InferenceAccelerator specified in a task definition. public let value: String public init(type: ResourceType, value: String) { @@ -5719,9 +5711,9 @@ extension ECS { /// An optional tag specified when a task is started. For example, if you automatically /// trigger a task to run a batch process job, you could apply a unique identifier for that /// job to your task with the startedBy parameter. You can then identify which - /// tasks belong to that job by filtering the results of a ListTasks call - /// with the startedBy value. Up to 128 letters (uppercase and lowercase), - /// numbers, hyphens (-), and underscores (_) are allowed. If a task is started by an Amazon ECS service, then the startedBy parameter + /// tasks belong to that job by filtering the results of a ListTasks call with + /// the startedBy value. Up to 128 letters (uppercase and lowercase), numbers, + /// hyphens (-), forward slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, then the startedBy parameter /// contains the deployment ID of the service that starts it. public let startedBy: String? /// The metadata that you apply to the task to help you categorize and organize them. Each @@ -5733,8 +5725,8 @@ extension ECS { /// Resource of the principal's permissions policy. When you specify a task definition, you must either specify a specific revision, or /// all revisions in the ARN. To specify a specific revision, include the revision number in the ARN. For example, /// to specify revision 2, use - /// arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2. To specify all revisions, use the wildcard (*) in the ARN. For example, to specify all - /// revisions, use + /// arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2. To specify all revisions, use the wildcard (*) in the ARN. For example, to specify + /// all revisions, use /// arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*. For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let taskDefinition: String /// The details of the volume that was configuredAtLaunch. You can configure @@ -6503,9 +6495,9 @@ extension ECS { /// An optional tag specified when a task is started. For example, if you automatically /// trigger a task to run a batch process job, you could apply a unique identifier for that /// job to your task with the startedBy parameter. You can then identify which - /// tasks belong to that job by filtering the results of a ListTasks call - /// with the startedBy value. Up to 36 letters (uppercase and lowercase), - /// numbers, hyphens (-), and underscores (_) are allowed. If a task is started by an Amazon ECS service, the startedBy parameter + /// tasks belong to that job by filtering the results of a ListTasks call with + /// the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + /// hyphens (-), forward slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, the startedBy parameter /// contains the deployment ID of the service that starts it. public let startedBy: String? /// The metadata that you apply to the task to help you categorize and organize them. Each @@ -7067,14 +7059,16 @@ extension ECS { /// The number of cpu units used by the task. If you use the EC2 launch type, /// this field is optional. Any value can be used. If you use the Fargate launch type, this /// field is required. You must use one of the following values. The value that you choose - /// determines your range of valid values for the memory parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on + /// determines your range of valid values for the memory parameter. If you use the EC2 launch type, this field is optional. Supported values + /// are between 128 CPU units (0.125 vCPUs) and 10240 + /// CPU units (10 vCPUs). The CPU units cannot be less than 1 vCPU when you use Windows containers on /// Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later. public let cpu: String? /// The Unix timestamp for the time when the task definition was deregistered. public let deregisteredAt: Date? /// The ephemeral storage settings to use for tasks run with the task definition. public let ephemeralStorage: EphemeralStorage? - /// The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide. + /// The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let executionRoleArn: String? /// The name of a family that this task definition is registered to. Up to 255 characters /// are allowed. Letters (both uppercase and lowercase letters), numbers, hyphens (-), and @@ -7084,7 +7078,7 @@ extension ECS { public let family: String? /// The Elastic Inference accelerator that's associated with the task. public let inferenceAccelerators: [InferenceAccelerator]? - /// The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. + /// The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. public let ipcMode: IpcMode? /// The amount (in MiB) of memory used by the task. If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory /// value or a container-level memory value. This field is optional and any value can be @@ -7094,9 +7088,9 @@ extension ECS { /// following values. The value you choose determines your range of valid values for the /// cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. public let memory: String? - /// The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. For more information, see Network settings in the Docker run reference. + /// The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. public let networkMode: NetworkMode? - /// The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate. + /// The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate. public let pidMode: PidMode? /// An array of placement constraint objects to use for tasks. This parameter isn't supported for tasks run on Fargate. public let placementConstraints: [TaskDefinitionPlacementConstraint]? @@ -7137,11 +7131,9 @@ extension ECS { /// The full Amazon Resource Name (ARN) of the task definition. public let taskDefinitionArn: String? /// The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the - /// task permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS - /// Task Role in the Amazon Elastic Container Service Developer Guide. IAM roles for tasks on Windows require that the -EnableTaskIAMRole - /// option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some - /// configuration code to use the feature. For more information, see Windows IAM roles - /// for tasks in the Amazon Elastic Container Service Developer Guide. + /// task permission to call Amazon Web Services APIs on your behalf. For informationabout the required + /// IAM roles for Amazon ECS, see IAM + /// roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let taskRoleArn: String? /// The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide. The host and sourcePath parameters aren't supported for /// tasks run on Fargate. @@ -7222,11 +7214,12 @@ extension ECS { } public struct TaskEphemeralStorage: AWSDecodableShape { - /// Specify an Key Management Service key ID to encrypt the ephemeral storage for the task. + /// Specify an Key Management Service key ID to encrypt the ephemeral storage for the + /// task. public let kmsKeyId: String? /// The total amount, in GiB, of the ephemeral storage to set for the task. The minimum - /// supported value is 20 GiB and the maximum supported value is
 200 - /// GiB. + /// supported value is 20 GiB and the maximum supported value is + /// 200 GiB. public let sizeInGiB: Int? public init(kmsKeyId: String? = nil, sizeInGiB: Int? = nil) { @@ -8343,7 +8336,11 @@ public struct ECSErrorType: AWSErrorType { public static var blockedException: Self { .init(.blockedException) } /// These errors are usually caused by a client action. This client action might be using /// an action or resource on behalf of a user that doesn't have permissions to use the - /// action or resource. Or, it might be specifying an identifier that isn't valid. + /// action or resource. Or, it might be specifying an identifier that isn't valid. The following list includes additional causes for the error: The RunTask could not be processed because you use managed + /// scaling and there is a capacity error because the quota of tasks in the + /// PROVISIONING per cluster has been reached. For information + /// about the service quotas, see Amazon ECS + /// service quotas. public static var clientException: Self { .init(.clientException) } /// You can't delete a cluster that has registered container instances. First, deregister /// the container instances before you can delete the cluster. For more information, see @@ -8402,8 +8399,8 @@ public struct ECSErrorType: AWSErrorType { /// Exec in the Amazon Elastic Container Service Developer Guide. public static var targetNotConnectedException: Self { .init(.targetNotConnectedException) } /// The specified target wasn't found. You can view your available container instances - /// with ListContainerInstances. Amazon ECS container instances are - /// cluster-specific and Region-specific. + /// with ListContainerInstances. Amazon ECS container instances are cluster-specific and + /// Region-specific. public static var targetNotFoundException: Self { .init(.targetNotFoundException) } /// The specified task set wasn't found. You can view your available task sets with DescribeTaskSets. Task sets are specific to each cluster, service and /// Region. diff --git a/Sources/Soto/Services/EKS/EKS_shapes.swift b/Sources/Soto/Services/EKS/EKS_shapes.swift index 5d74885a83..6742a5641a 100644 --- a/Sources/Soto/Services/EKS/EKS_shapes.swift +++ b/Sources/Soto/Services/EKS/EKS_shapes.swift @@ -28,6 +28,8 @@ extension EKS { public enum AMITypes: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case al2023Arm64Standard = "AL2023_ARM_64_STANDARD" + case al2023X8664NEURON = "AL2023_x86_64_NEURON" + case al2023X8664NVIDIA = "AL2023_x86_64_NVIDIA" case al2023X8664STANDARD = "AL2023_x86_64_STANDARD" case al2Arm64 = "AL2_ARM_64" case al2X8664 = "AL2_x86_64" @@ -286,6 +288,12 @@ extension EKS { public var description: String { return self.rawValue } } + public enum SupportType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case extended = "EXTENDED" + case standard = "STANDARD" + public var description: String { return self.rawValue } + } + public enum TaintEffect: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case noExecute = "NO_EXECUTE" case noSchedule = "NO_SCHEDULE" @@ -321,6 +329,7 @@ extension EKS { case subnets = "Subnets" case taintsToAdd = "TaintsToAdd" case taintsToRemove = "TaintsToRemove" + case upgradePolicy = "UpgradePolicy" case version = "Version" public var description: String { return self.rawValue } } @@ -342,6 +351,7 @@ extension EKS { case disassociateIdentityProviderConfig = "DisassociateIdentityProviderConfig" case endpointAccessUpdate = "EndpointAccessUpdate" case loggingUpdate = "LoggingUpdate" + case upgradePolicyUpdate = "UpgradePolicyUpdate" case versionUpdate = "VersionUpdate" case vpcConfigUpdate = "VpcConfigUpdate" public var description: String { return self.rawValue } @@ -917,10 +927,12 @@ extension EKS { public let status: ClusterStatus? /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? + /// This value indicates if extended support is enabled or disabled for the cluster. Learn more about EKS Extended Support in the EKS User Guide. + public let upgradePolicy: UpgradePolicyResponse? /// The Kubernetes server version for the cluster. public let version: String? - public init(accessConfig: AccessConfigResponse? = nil, arn: String? = nil, certificateAuthority: Certificate? = nil, clientRequestToken: String? = nil, connectorConfig: ConnectorConfigResponse? = nil, createdAt: Date? = nil, encryptionConfig: [EncryptionConfig]? = nil, endpoint: String? = nil, health: ClusterHealth? = nil, id: String? = nil, identity: Identity? = nil, kubernetesNetworkConfig: KubernetesNetworkConfigResponse? = nil, logging: Logging? = nil, name: String? = nil, outpostConfig: OutpostConfigResponse? = nil, platformVersion: String? = nil, resourcesVpcConfig: VpcConfigResponse? = nil, roleArn: String? = nil, status: ClusterStatus? = nil, tags: [String: String]? = nil, version: String? = nil) { + public init(accessConfig: AccessConfigResponse? = nil, arn: String? = nil, certificateAuthority: Certificate? = nil, clientRequestToken: String? = nil, connectorConfig: ConnectorConfigResponse? = nil, createdAt: Date? = nil, encryptionConfig: [EncryptionConfig]? = nil, endpoint: String? = nil, health: ClusterHealth? = nil, id: String? = nil, identity: Identity? = nil, kubernetesNetworkConfig: KubernetesNetworkConfigResponse? = nil, logging: Logging? = nil, name: String? = nil, outpostConfig: OutpostConfigResponse? = nil, platformVersion: String? = nil, resourcesVpcConfig: VpcConfigResponse? = nil, roleArn: String? = nil, status: ClusterStatus? = nil, tags: [String: String]? = nil, upgradePolicy: UpgradePolicyResponse? = nil, version: String? = nil) { self.accessConfig = accessConfig self.arn = arn self.certificateAuthority = certificateAuthority @@ -941,6 +953,7 @@ extension EKS { self.roleArn = roleArn self.status = status self.tags = tags + self.upgradePolicy = upgradePolicy self.version = version } @@ -965,6 +978,7 @@ extension EKS { case roleArn = "roleArn" case status = "status" case tags = "tags" + case upgradePolicy = "upgradePolicy" case version = "version" } } @@ -1294,10 +1308,12 @@ extension EKS { public let roleArn: String /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? + /// New clusters, by default, have extended support enabled. You can disable extended support when creating a cluster by setting this value to STANDARD. + public let upgradePolicy: UpgradePolicyRequest? /// The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. public let version: String? - public init(accessConfig: CreateAccessConfigRequest? = nil, bootstrapSelfManagedAddons: Bool? = nil, clientRequestToken: String? = CreateClusterRequest.idempotencyToken(), encryptionConfig: [EncryptionConfig]? = nil, kubernetesNetworkConfig: KubernetesNetworkConfigRequest? = nil, logging: Logging? = nil, name: String, outpostConfig: OutpostConfigRequest? = nil, resourcesVpcConfig: VpcConfigRequest, roleArn: String, tags: [String: String]? = nil, version: String? = nil) { + public init(accessConfig: CreateAccessConfigRequest? = nil, bootstrapSelfManagedAddons: Bool? = nil, clientRequestToken: String? = CreateClusterRequest.idempotencyToken(), encryptionConfig: [EncryptionConfig]? = nil, kubernetesNetworkConfig: KubernetesNetworkConfigRequest? = nil, logging: Logging? = nil, name: String, outpostConfig: OutpostConfigRequest? = nil, resourcesVpcConfig: VpcConfigRequest, roleArn: String, tags: [String: String]? = nil, upgradePolicy: UpgradePolicyRequest? = nil, version: String? = nil) { self.accessConfig = accessConfig self.bootstrapSelfManagedAddons = bootstrapSelfManagedAddons self.clientRequestToken = clientRequestToken @@ -1309,6 +1325,7 @@ extension EKS { self.resourcesVpcConfig = resourcesVpcConfig self.roleArn = roleArn self.tags = tags + self.upgradePolicy = upgradePolicy self.version = version } @@ -1338,6 +1355,7 @@ extension EKS { case resourcesVpcConfig = "resourcesVpcConfig" case roleArn = "roleArn" case tags = "tags" + case upgradePolicy = "upgradePolicy" case version = "version" } } @@ -4480,13 +4498,16 @@ extension EKS { /// The name of the Amazon EKS cluster to update. public let name: String public let resourcesVpcConfig: VpcConfigRequest? + /// You can enable or disable extended support for clusters currently on standard support. You cannot disable extended support once it starts. You must enable extended support before your cluster exits standard support. + public let upgradePolicy: UpgradePolicyRequest? - public init(accessConfig: UpdateAccessConfigRequest? = nil, clientRequestToken: String? = UpdateClusterConfigRequest.idempotencyToken(), logging: Logging? = nil, name: String, resourcesVpcConfig: VpcConfigRequest? = nil) { + public init(accessConfig: UpdateAccessConfigRequest? = nil, clientRequestToken: String? = UpdateClusterConfigRequest.idempotencyToken(), logging: Logging? = nil, name: String, resourcesVpcConfig: VpcConfigRequest? = nil, upgradePolicy: UpgradePolicyRequest? = nil) { self.accessConfig = accessConfig self.clientRequestToken = clientRequestToken self.logging = logging self.name = name self.resourcesVpcConfig = resourcesVpcConfig + self.upgradePolicy = upgradePolicy } public func encode(to encoder: Encoder) throws { @@ -4497,6 +4518,7 @@ extension EKS { try container.encodeIfPresent(self.logging, forKey: .logging) request.encodePath(self.name, key: "name") try container.encodeIfPresent(self.resourcesVpcConfig, forKey: .resourcesVpcConfig) + try container.encodeIfPresent(self.upgradePolicy, forKey: .upgradePolicy) } private enum CodingKeys: String, CodingKey { @@ -4504,6 +4526,7 @@ extension EKS { case clientRequestToken = "clientRequestToken" case logging = "logging" case resourcesVpcConfig = "resourcesVpcConfig" + case upgradePolicy = "upgradePolicy" } } @@ -4844,6 +4867,32 @@ extension EKS { } } + public struct UpgradePolicyRequest: AWSEncodableShape { + /// If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support. Learn more about EKS Extended Support in the EKS User Guide. + public let supportType: SupportType? + + public init(supportType: SupportType? = nil) { + self.supportType = supportType + } + + private enum CodingKeys: String, CodingKey { + case supportType = "supportType" + } + } + + public struct UpgradePolicyResponse: AWSDecodableShape { + /// If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support. Learn more about EKS Extended Support in the EKS User Guide. + public let supportType: SupportType? + + public init(supportType: SupportType? = nil) { + self.supportType = supportType + } + + private enum CodingKeys: String, CodingKey { + case supportType = "supportType" + } + } + public struct VpcConfigRequest: AWSEncodableShape { /// Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide . public let endpointPrivateAccess: Bool? diff --git a/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift b/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift index 06bfce2235..2ae38fb306 100644 --- a/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift +++ b/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift @@ -151,7 +151,7 @@ public struct ElastiCache: AWSService { ) } - /// Creates a copy of an existing serverless cache’s snapshot. Available for Redis only. + /// Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only. @Sendable public func copyServerlessCacheSnapshot(_ input: CopyServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CopyServerlessCacheSnapshotResponse { return try await self.client.execute( @@ -164,7 +164,7 @@ public struct ElastiCache: AWSService { ) } - /// Makes a copy of an existing snapshot. This operation is valid for Redis only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. + /// Makes a copy of an existing snapshot. This operation is valid for Redis OSS only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. @Sendable public func copySnapshot(_ input: CopySnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CopySnapshotResult { return try await self.client.execute( @@ -177,7 +177,7 @@ public struct ElastiCache: AWSService { ) } - /// Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis. This operation is not supported for Redis (cluster mode enabled) clusters. + /// Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis OSS. This operation is not supported for Redis OSS (cluster mode enabled) clusters. @Sendable public func createCacheCluster(_ input: CreateCacheClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCacheClusterResult { return try await self.client.execute( @@ -229,7 +229,7 @@ public struct ElastiCache: AWSService { ) } - /// Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. + /// Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. @Sendable public func createGlobalReplicationGroup(_ input: CreateGlobalReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGlobalReplicationGroupResult { return try await self.client.execute( @@ -242,7 +242,7 @@ public struct ElastiCache: AWSService { ) } - /// Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide. This operation is valid for Redis only. + /// Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide. This operation is valid for Redis OSS only. @Sendable public func createReplicationGroup(_ input: CreateReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateReplicationGroupResult { return try await self.client.execute( @@ -268,7 +268,7 @@ public struct ElastiCache: AWSService { ) } - /// This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis only. + /// This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only. @Sendable public func createServerlessCacheSnapshot(_ input: CreateServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServerlessCacheSnapshotResponse { return try await self.client.execute( @@ -281,7 +281,7 @@ public struct ElastiCache: AWSService { ) } - /// Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Redis only. + /// Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Redis OSS only. @Sendable public func createSnapshot(_ input: CreateSnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSnapshotResult { return try await self.client.execute( @@ -294,7 +294,7 @@ public struct ElastiCache: AWSService { ) } - /// For Redis engine version 6.0 onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC). + /// For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see Using Role Based Access Control (RBAC). @Sendable public func createUser(_ input: CreateUserMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> User { return try await self.client.execute( @@ -307,7 +307,7 @@ public struct ElastiCache: AWSService { ) } - /// For Redis engine version 6.0 onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC) + /// For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more information, see Using Role Based Access Control (RBAC) @Sendable public func createUserGroup(_ input: CreateUserGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> UserGroup { return try await self.client.execute( @@ -333,7 +333,7 @@ public struct ElastiCache: AWSService { ) } - /// Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time. + /// Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. @Sendable public func decreaseReplicaCount(_ input: DecreaseReplicaCountMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DecreaseReplicaCountResult { return try await self.client.execute( @@ -346,7 +346,7 @@ public struct ElastiCache: AWSService { ) } - /// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Redis (cluster mode enabled) clusters Redis (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis (cluster mode enabled) replication group A cluster that is not in the available state + /// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Redis OSS (cluster mode enabled) clusters Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state @Sendable public func deleteCacheCluster(_ input: DeleteCacheClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteCacheClusterResult { return try await self.client.execute( @@ -411,7 +411,7 @@ public struct ElastiCache: AWSService { ) } - /// Deletes an existing replication group. By default, this operation deletes the entire replication group, including the primary/primaries and all of the read replicas. If the replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation. This operation is valid for Redis only. + /// Deletes an existing replication group. By default, this operation deletes the entire replication group, including the primary/primaries and all of the read replicas. If the replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation. CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. This operation is valid for Redis OSS only. @Sendable public func deleteReplicationGroup(_ input: DeleteReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteReplicationGroupResult { return try await self.client.execute( @@ -424,7 +424,7 @@ public struct ElastiCache: AWSService { ) } - /// Deletes a specified existing serverless cache. + /// Deletes a specified existing serverless cache. CreateServerlessCacheSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. @Sendable public func deleteServerlessCache(_ input: DeleteServerlessCacheRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServerlessCacheResponse { return try await self.client.execute( @@ -437,7 +437,7 @@ public struct ElastiCache: AWSService { ) } - /// Deletes an existing serverless cache snapshot. Available for Redis only. + /// Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. @Sendable public func deleteServerlessCacheSnapshot(_ input: DeleteServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServerlessCacheSnapshotResponse { return try await self.client.execute( @@ -450,7 +450,7 @@ public struct ElastiCache: AWSService { ) } - /// Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Redis only. + /// Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Redis OSS only. @Sendable public func deleteSnapshot(_ input: DeleteSnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteSnapshotResult { return try await self.client.execute( @@ -463,7 +463,7 @@ public struct ElastiCache: AWSService { ) } - /// For Redis engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). + /// For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). @Sendable public func deleteUser(_ input: DeleteUserMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> User { return try await self.client.execute( @@ -476,7 +476,7 @@ public struct ElastiCache: AWSService { ) } - /// For Redis engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). + /// For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). @Sendable public func deleteUserGroup(_ input: DeleteUserGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> UserGroup { return try await self.client.execute( @@ -606,7 +606,7 @@ public struct ElastiCache: AWSService { ) } - /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis only. + /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis OSS only. @Sendable public func describeReplicationGroups(_ input: DescribeReplicationGroupsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ReplicationGroupMessage { return try await self.client.execute( @@ -645,7 +645,7 @@ public struct ElastiCache: AWSService { ) } - /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis only. + /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis OSS and Serverless Memcached only. @Sendable public func describeServerlessCacheSnapshots(_ input: DescribeServerlessCacheSnapshotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeServerlessCacheSnapshotsResponse { return try await self.client.execute( @@ -684,7 +684,7 @@ public struct ElastiCache: AWSService { ) } - /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis only. + /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis OSS only. @Sendable public func describeSnapshots(_ input: DescribeSnapshotsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSnapshotsListMessage { return try await self.client.execute( @@ -749,7 +749,7 @@ public struct ElastiCache: AWSService { ) } - /// Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis only. + /// Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only. @Sendable public func exportServerlessCacheSnapshot(_ input: ExportServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExportServerlessCacheSnapshotResponse { return try await self.client.execute( @@ -788,7 +788,7 @@ public struct ElastiCache: AWSService { ) } - /// Dynamically increases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time. + /// Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. @Sendable public func increaseReplicaCount(_ input: IncreaseReplicaCountMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> IncreaseReplicaCountResult { return try await self.client.execute( @@ -801,7 +801,7 @@ public struct ElastiCache: AWSService { ) } - /// Lists all available node types that you can scale your Redis cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. + /// Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. @Sendable public func listAllowedNodeTypeModifications(_ input: ListAllowedNodeTypeModificationsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> AllowedNodeTypeModificationsMessage { return try await self.client.execute( @@ -879,7 +879,7 @@ public struct ElastiCache: AWSService { ) } - /// Modifies the settings for a replication group. This is limited to Redis 7 and newer. Scaling for Amazon ElastiCache for Redis (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Redis only. + /// Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer. Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Redis OSS only. @Sendable public func modifyReplicationGroup(_ input: ModifyReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyReplicationGroupResult { return try await self.client.execute( @@ -944,7 +944,7 @@ public struct ElastiCache: AWSService { ) } - /// Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis or Managing Costs with Reserved Nodes for Memcached. + /// Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached. @Sendable public func purchaseReservedCacheNodesOffering(_ input: PurchaseReservedCacheNodesOfferingMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> PurchaseReservedCacheNodesOfferingResult { return try await self.client.execute( @@ -970,7 +970,7 @@ public struct ElastiCache: AWSService { ) } - /// Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached and Redis (cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled) clusters. If you make changes to parameters that require a Redis (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. + /// Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled) clusters. If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. @Sendable public func rebootCacheCluster(_ input: RebootCacheClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> RebootCacheClusterResult { return try await self.client.execute( @@ -1035,7 +1035,7 @@ public struct ElastiCache: AWSService { ) } - /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. + /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. @Sendable public func testFailover(_ input: TestFailoverMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> TestFailoverResult { return try await self.client.execute( @@ -1246,7 +1246,7 @@ extension ElastiCache { ) } - /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis only. + /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis OSS only. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1303,7 +1303,7 @@ extension ElastiCache { ) } - /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis only. + /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis OSS and Serverless Memcached only. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1360,7 +1360,7 @@ extension ElastiCache { ) } - /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis only. + /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis OSS only. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift b/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift index d053c44c1f..9c69f70a9c 100644 --- a/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift +++ b/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift @@ -247,10 +247,10 @@ extension ElastiCache { } public struct AllowedNodeTypeModificationsMessage: AWSDecodableShape { - /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. + /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. @OptionalCustomCoding> public var scaleDownModifications: [String]? - /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling up a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. + /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling up a Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. @OptionalCustomCoding> public var scaleUpModifications: [String]? @@ -413,13 +413,13 @@ extension ElastiCache { /// The ARN (Amazon Resource Name) of the cache cluster. public let arn: String? - /// A flag that enables encryption at-rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + /// A flag that enables encryption at-rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let atRestEncryptionEnabled: Bool? - /// A flag that enables using an AuthToken (password) when issuing Redis commands. Default: false + /// A flag that enables using an AuthToken (password) when issuing Redis OSS commands. Default: false public let authTokenEnabled: Bool? /// The date the auth token was last modified public let authTokenLastModifiedDate: Date? - ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The date and time when the cluster was created. public let cacheClusterCreateTime: Date? @@ -430,7 +430,7 @@ extension ElastiCache { /// A list of cache nodes that are members of the cluster. @OptionalCustomCoding> public var cacheNodes: [CacheNode]? - /// The name of the compute and memory capacity node type for the cluster. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The name of the compute and memory capacity node type for the cluster. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -438,7 +438,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -451,14 +451,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// Status of the cache parameter group. public let cacheParameterGroup: CacheParameterGroupStatus? @@ -475,16 +475,16 @@ extension ElastiCache { public let engine: String? /// The version of the cache engine that is used in this cluster. public let engineVersion: String? - /// The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Returns the destination, format and type of the logs. @OptionalCustomCoding> public var logDeliveryConfigurations: [LogDeliveryConfiguration]? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let networkType: NetworkType? /// Describes a notification topic and its status. Notification topics are used for publishing ElastiCache events to subscribers using Amazon Simple Notification Service (SNS). public let notificationConfiguration: NotificationConfiguration? - /// The number of cache nodes in the cluster. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. + /// The number of cache nodes in the cluster. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. public let numCacheNodes: Int? public let pendingModifiedValues: PendingModifiedValues? /// The name of the Availability Zone in which the cluster is located or "Multiple" if the cache nodes are located in different Availability Zones. @@ -504,7 +504,7 @@ extension ElastiCache { public let snapshotRetentionLimit: Int? /// The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your cluster. Example: 05:00-09:00 public let snapshotWindow: String? - /// A flag that enables in-transit encryption when set to true. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + /// A flag that enables in-transit encryption when set to true. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let transitEncryptionEnabled: Bool? /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. public let transitEncryptionMode: TransitEncryptionMode? @@ -991,7 +991,7 @@ extension ElastiCache { /// A list of subnets associated with the cache subnet group. @OptionalCustomCoding> public var subnets: [Subnet]? - /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. @OptionalCustomCoding> public var supportedNetworkTypes: [NetworkType]? /// The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group. @@ -1098,11 +1098,11 @@ extension ElastiCache { public struct _PreferredAvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "PreferredAvailabilityZone" } public struct _PreferredOutpostArnsEncoding: ArrayCoderProperties { public static let member = "PreferredOutpostArn" } - /// The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis replication group you are working with. The minimum number of replicas in a shard or replication group is: Redis (cluster mode disabled) If Multi-AZ: 1 If Multi-AZ: 0 Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) + /// The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis OSS replication group you are working with. The minimum number of replicas in a shard or replication group is: Redis OSS (cluster mode disabled) If Multi-AZ: 1 If Multi-AZ: 0 Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) public let newReplicaCount: Int? - /// The 4-digit id for the node group you are configuring. For Redis (cluster mode disabled) replication groups, the node group id is always 0001. To find a Redis (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id. + /// The 4-digit id for the node group you are configuring. For Redis OSS (cluster mode disabled) replication groups, the node group id is always 0001. To find a Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id. public let nodeGroupId: String? - /// A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache for Redis selects the availability zone for each of the replicas. + /// A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache (Redis OSS) selects the availability zone for each of the replicas. @OptionalCustomCoding> public var preferredAvailabilityZones: [String]? /// The outpost ARNs in which the cache cluster is created. @@ -1133,14 +1133,14 @@ extension ElastiCache { public struct CopyServerlessCacheSnapshotRequest: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// The identifier of the KMS key used to encrypt the target snapshot. Available for Redis only. + /// The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only. public let kmsKeyId: String? - /// The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis only. + /// The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only. public let sourceServerlessCacheSnapshotName: String? - /// A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis only. Default: NULL + /// A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL @OptionalCustomCoding> public var tags: [Tag]? - /// The identifier for the snapshot to be created. Available for Redis only. + /// The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only. public let targetServerlessCacheSnapshotName: String? public init(kmsKeyId: String? = nil, sourceServerlessCacheSnapshotName: String? = nil, tags: [Tag]? = nil, targetServerlessCacheSnapshotName: String? = nil) { @@ -1159,7 +1159,7 @@ extension ElastiCache { } public struct CopyServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The response for the attempt to copy the serverless cache snapshot. Available for Redis only. + /// The response for the attempt to copy the serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? public init(serverlessCacheSnapshot: ServerlessCacheSnapshot? = nil) { @@ -1226,13 +1226,13 @@ extension ElastiCache { /// Reserved parameter. The password used to access a password protected server. Password constraints: Must be only printable ASCII characters. Must be at least 16 characters and no more than 128 characters in length. The only permitted printable special characters are !, &, #, $, ^, , and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. public let authToken: String? - ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. This parameter is only supported for Memcached clusters. If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. public let azMode: AZMode? /// The node group (shard) identifier. This parameter is stored as a lowercase string. Constraints: A name must contain from 1 to 50 alphanumeric characters or hyphens. The first character must be a letter. A name cannot end with a hyphen or contain two consecutive hyphens. public let cacheClusterId: String? - /// The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -1240,7 +1240,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -1253,14 +1253,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster. public let cacheParameterGroupName: String? @@ -1273,16 +1273,16 @@ extension ElastiCache { public let engine: String? /// The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. public let engineVersion: String? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Specifies the destination, format and type of the logs. @OptionalCustomCoding> public var logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let networkType: NetworkType? /// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. public let notificationTopicArn: String? - /// The initial number of cache nodes that the cluster has. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. + /// The initial number of cache nodes that the cluster has. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. public let numCacheNodes: Int? /// Specifies whether the nodes in the cluster are created in a single outpost or across multiple outposts. public let outpostMode: OutpostMode? @@ -1305,10 +1305,10 @@ extension ElastiCache { /// One or more VPC security groups associated with the cluster. Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). @OptionalCustomCoding> public var securityGroupIds: [String]? - /// A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + /// A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb @OptionalCustomCoding> public var snapshotArns: [String]? - /// The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. + /// The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. public let snapshotName: String? /// The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted. This parameter is only valid if the Engine parameter is redis. Default: 0 (i.e., automatic backups are disabled for this cache cluster). public let snapshotRetentionLimit: Int? @@ -1559,15 +1559,15 @@ extension ElastiCache { public struct _SnapshotArnsEncoding: ArrayCoderProperties { public static let member = "SnapshotArn" } public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + /// A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let atRestEncryptionEnabled: Bool? /// Reserved parameter. The password used to access a password protected server. AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. Password constraints: Must be only printable ASCII characters. Must be at least 16 characters and no more than 128 characters in length. The only permitted printable special characters are !, &, #, $, ^, , and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. public let authToken: String? - /// Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups. Default: false + /// Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false public let automaticFailoverEnabled: Bool? - ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? - /// The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -1575,7 +1575,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -1588,23 +1588,23 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? - /// The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + /// The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. public let cacheParameterGroupName: String? /// A list of cache security group names to associate with this replication group. @OptionalCustomCoding> public var cacheSecurityGroupNames: [String]? /// The name of the cache subnet group to be used for the replication group. If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups. public let cacheSubnetGroupName: String? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering. public let dataTieringEnabled: Bool? @@ -1614,7 +1614,7 @@ extension ElastiCache { public let engineVersion: String? /// The name of the Global datastore public let globalReplicationGroupId: String? - /// The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// The ID of the KMS key used to encrypt the disk in the cluster. public let kmsKeyId: String? @@ -1623,16 +1623,16 @@ extension ElastiCache { public var logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? /// A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ. public let multiAZEnabled: Bool? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let networkType: NetworkType? - /// A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group. + /// A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group. @OptionalCustomCoding> public var nodeGroupConfiguration: [NodeGroupConfiguration]? /// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. public let notificationTopicArn: String? /// The number of clusters this replication group initially has. This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead. If AutomaticFailoverEnabled is true, the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6. The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). public let numCacheClusters: Int? - /// An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 + /// An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 public let numNodeGroups: Int? /// The port number on which each member of the replication group accepts connections. public let port: Int? @@ -1652,9 +1652,9 @@ extension ElastiCache { /// One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). @OptionalCustomCoding> public var securityGroupIds: [String]? - /// The name of the snapshot used to create a replication group. Available for Redis only. + /// The name of the snapshot used to create a replication group. Available for Redis OSS only. public let serverlessCacheSnapshotName: String? - /// A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + /// A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb @OptionalCustomCoding> public var snapshotArns: [String]? /// The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created. @@ -1666,9 +1666,9 @@ extension ElastiCache { /// A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. Tags on replication groups will be replicated to all nodes. @OptionalCustomCoding> public var tags: [Tag]? - /// A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. + /// A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. public let transitEncryptionEnabled: Bool? - /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group. + /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group. public let transitEncryptionMode: TransitEncryptionMode? /// The user group to associate with the replication group. @OptionalCustomCoding> @@ -1790,7 +1790,7 @@ extension ElastiCache { /// Sets the cache usage limits for storage and ElastiCache Processing Units for the cache. public let cacheUsageLimits: CacheUsageLimits? - /// The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis only. + /// The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only. public let dailySnapshotTime: String? /// User-provided description for the serverless cache. The default is NULL, i.e. if no description is provided then an empty string will be returned. The maximum length is 255 characters. public let description: String? @@ -1805,10 +1805,10 @@ extension ElastiCache { public var securityGroupIds: [String]? /// User-provided identifier for the serverless cache. This parameter is stored as a lowercase string. public let serverlessCacheName: String? - /// The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only. + /// The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var snapshotArnsToRestore: [String]? - /// The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only. + /// The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only. public let snapshotRetentionLimit: Int? /// A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. @OptionalCustomCoding> @@ -1816,7 +1816,7 @@ extension ElastiCache { /// The list of tags (key, value) pairs to be added to the serverless cache resource. Default is NULL. @OptionalCustomCoding> public var tags: [Tag]? - /// The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL. + /// The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL. public let userGroupId: String? public init(cacheUsageLimits: CacheUsageLimits? = nil, dailySnapshotTime: String? = nil, description: String? = nil, engine: String? = nil, kmsKeyId: String? = nil, majorEngineVersion: String? = nil, securityGroupIds: [String]? = nil, serverlessCacheName: String? = nil, snapshotArnsToRestore: [String]? = nil, snapshotRetentionLimit: Int? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, userGroupId: String? = nil) { @@ -1868,13 +1868,13 @@ extension ElastiCache { public struct CreateServerlessCacheSnapshotRequest: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// The ID of the KMS key used to encrypt the snapshot. Available for Redis only. Default: NULL + /// The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL public let kmsKeyId: String? - /// The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis only. + /// The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheName: String? - /// The name for the snapshot being created. Must be unique for the customer account. Available for Redis only. Must be between 1 and 255 characters. + /// The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters. public let serverlessCacheSnapshotName: String? - /// A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis only. + /// A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var tags: [Tag]? @@ -1894,7 +1894,7 @@ extension ElastiCache { } public struct CreateServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis only. + /// The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? public init(serverlessCacheSnapshot: ServerlessCacheSnapshot? = nil) { @@ -1953,9 +1953,9 @@ extension ElastiCache { public struct CreateUserGroupMessage: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// The current supported value is Redis. + /// The current supported value is Redis user. public let engine: String? - /// A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis only. + /// A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis OSS only. @OptionalCustomCoding> public var tags: [Tag]? /// The ID of the user group. @@ -2087,10 +2087,10 @@ extension ElastiCache { /// Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true. public let applyImmediately: Bool? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. @OptionalCustomCoding> public var globalNodeGroupsToRemove: [String]? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache for Redis will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. @OptionalCustomCoding> public var globalNodeGroupsToRetain: [String]? /// The name of the Global datastore @@ -2132,9 +2132,9 @@ extension ElastiCache { /// If True, the number of replica nodes is decreased immediately. ApplyImmediately=False is not currently supported. public let applyImmediately: Bool? - /// The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Redis (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) + /// The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Redis OSS (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) public let newReplicaCount: Int? - /// A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + /// A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. @OptionalCustomCoding> public var replicaConfiguration: [ConfigureShard]? /// A list of the node ids to remove from the replication group or node group (shard). @@ -2309,7 +2309,7 @@ extension ElastiCache { } public struct DeleteServerlessCacheRequest: AWSEncodableShape { - /// Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis only. Default: NULL, i.e. a final snapshot is not taken. + /// Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken. public let finalSnapshotName: String? /// The identifier of the serverless cache to be deleted. public let serverlessCacheName: String? @@ -2339,7 +2339,7 @@ extension ElastiCache { } public struct DeleteServerlessCacheSnapshotRequest: AWSEncodableShape { - /// Idenfitier of the snapshot to be deleted. Available for Redis only. + /// Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshotName: String? public init(serverlessCacheSnapshotName: String? = nil) { @@ -2352,7 +2352,7 @@ extension ElastiCache { } public struct DeleteServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The snapshot to be deleted. Available for Redis only. + /// The snapshot to be deleted. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? public init(serverlessCacheSnapshot: ServerlessCacheSnapshot? = nil) { @@ -2427,7 +2427,7 @@ extension ElastiCache { public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. public let maxRecords: Int? - /// An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters. + /// An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters. public let showCacheClustersNotInReplicationGroups: Bool? /// An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. public let showCacheNodeInfo: Bool? @@ -2707,7 +2707,7 @@ extension ElastiCache { } public struct DescribeReservedCacheNodesMessage: AWSEncodableShape { - /// The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -2715,7 +2715,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -2728,14 +2728,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 public let duration: String? @@ -2776,7 +2776,7 @@ extension ElastiCache { } public struct DescribeReservedCacheNodesOfferingsMessage: AWSEncodableShape { - /// The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -2784,7 +2784,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -2797,14 +2797,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration. Valid Values: 1 | 3 | 31536000 | 94608000 public let duration: String? @@ -2841,15 +2841,15 @@ extension ElastiCache { } public struct DescribeServerlessCacheSnapshotsRequest: AWSEncodableShape { - /// The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis only.The default is 50. The Validation Constraints are a maximum of 50. + /// The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. public let maxResults: Int? - /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis only. + /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only. public let nextToken: String? - /// The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis only. + /// The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheName: String? - /// The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis only. + /// The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshotName: String? - /// The type of snapshot that is being described. Available for Redis only. + /// The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only. public let snapshotType: String? public init(maxResults: Int? = nil, nextToken: String? = nil, serverlessCacheName: String? = nil, serverlessCacheSnapshotName: String? = nil, snapshotType: String? = nil) { @@ -2872,9 +2872,9 @@ extension ElastiCache { public struct DescribeServerlessCacheSnapshotsResponse: AWSDecodableShape { public struct _ServerlessCacheSnapshotsEncoding: ArrayCoderProperties { public static let member = "ServerlessCacheSnapshot" } - /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis only. + /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only. public let nextToken: String? - /// The serverless caches snapshots associated with a given description request. Available for Redis only. + /// The serverless caches snapshots associated with a given description request. Available for Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var serverlessCacheSnapshots: [ServerlessCacheSnapshot]? @@ -3019,7 +3019,7 @@ extension ElastiCache { /// The cache cluster IDs @OptionalCustomCoding> public var cacheClusterIds: [String]? - /// The Elasticache engine to which the update applies. Either Redis or Memcached + /// The Elasticache engine to which the update applies. Either Redis OSS or Memcached. public let engine: String? /// An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? @@ -3115,7 +3115,7 @@ extension ElastiCache { } public struct DescribeUsersMessage: AWSEncodableShape { - /// The Redis engine. + /// The Redis OSS engine. public let engine: String? /// Filter to determine the list of User IDs to return. @OptionalCustomCoding> @@ -3352,9 +3352,9 @@ extension ElastiCache { } public struct ExportServerlessCacheSnapshotRequest: AWSEncodableShape { - /// Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis only. + /// Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis OSS only. public let s3BucketName: String? - /// The identifier of the serverless cache snapshot to be exported to S3. Available for Redis only. + /// The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only. public let serverlessCacheSnapshotName: String? public init(s3BucketName: String? = nil, serverlessCacheSnapshotName: String? = nil) { @@ -3369,7 +3369,7 @@ extension ElastiCache { } public struct ExportServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis only. + /// The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? public init(serverlessCacheSnapshot: ServerlessCacheSnapshot? = nil) { @@ -3463,17 +3463,17 @@ extension ElastiCache { /// The ARN (Amazon Resource Name) of the global replication group. public let arn: String? - /// A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. + /// A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. public let atRestEncryptionEnabled: Bool? - /// A flag that enables using an AuthToken (password) when issuing Redis commands. Default: false + /// A flag that enables using an AuthToken (password) when issuing Redis OSS commands. Default: false public let authTokenEnabled: Bool? /// The cache node type of the Global datastore public let cacheNodeType: String? /// A flag that indicates whether the Global datastore is cluster enabled. public let clusterEnabled: Bool? - /// The Elasticache engine. For Redis only. + /// The Elasticache engine. For Redis OSS only. public let engine: String? - /// The Elasticache Redis engine version. + /// The Elasticache (Redis OSS) engine version. public let engineVersion: String? /// Indicates the slot configuration and global identifier for each slice group. @OptionalCustomCoding> @@ -3487,7 +3487,7 @@ extension ElastiCache { public var members: [GlobalReplicationGroupMember]? /// The status of the Global datastore public let status: String? - /// A flag that enables in-transit encryption when set to true. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. + /// A flag that enables in-transit encryption when set to true. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. public let transitEncryptionEnabled: Bool? public init(arn: String? = nil, atRestEncryptionEnabled: Bool? = nil, authTokenEnabled: Bool? = nil, cacheNodeType: String? = nil, clusterEnabled: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, globalNodeGroups: [GlobalNodeGroup]? = nil, globalReplicationGroupDescription: String? = nil, globalReplicationGroupId: String? = nil, members: [GlobalReplicationGroupMember]? = nil, status: String? = nil, transitEncryptionEnabled: Bool? = nil) { @@ -3620,9 +3620,9 @@ extension ElastiCache { /// If True, the number of replica nodes is increased immediately. ApplyImmediately=False is not currently supported. public let applyImmediately: Bool? - /// The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. + /// The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. public let newReplicaCount: Int? - /// A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + /// A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. @OptionalCustomCoding> public var replicaConfiguration: [ConfigureShard]? /// The id of the replication group to which you want to add replica nodes. @@ -3777,9 +3777,9 @@ extension ElastiCache { public let applyImmediately: Bool? /// Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. public let authToken: String? - /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis AUTH + /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH public let authTokenUpdateStrategy: AuthTokenUpdateStrategyType? - ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones. Valid values: single-az | cross-az. This option is only supported for Memcached clusters. You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes are located in different Availability Zones. public let azMode: AZMode? @@ -3797,7 +3797,7 @@ extension ElastiCache { public var cacheSecurityGroupNames: [String]? /// The upgraded version of the cache engine to be run on the cache nodes. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. public let engineVersion: String? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Specifies the destination, format and type of the logs. @OptionalCustomCoding> @@ -3809,7 +3809,7 @@ extension ElastiCache { public let notificationTopicArn: String? /// The status of the Amazon SNS notification topic. Notifications are sent only if the status is active. Valid values: active | inactive public let notificationTopicStatus: String? - /// The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster. + /// The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster. public let numCacheNodes: Int? /// Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 public let preferredMaintenanceWindow: String? @@ -3996,11 +3996,11 @@ extension ElastiCache { public let applyImmediately: Bool? /// Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update-strategy parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. public let authToken: String? - /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis AUTH + /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH public let authTokenUpdateStrategy: AuthTokenUpdateStrategyType? /// Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false public let automaticFailoverEnabled: Bool? - ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// A valid cache node type that you want to scale this replication group to. public let cacheNodeType: String? @@ -4009,11 +4009,11 @@ extension ElastiCache { /// A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible. This parameter can be used only with replication group containing clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default. @OptionalCustomCoding> public var cacheSecurityGroupNames: [String]? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// The upgraded version of the cache engine to be run on the clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. public let engineVersion: String? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Specifies the destination, format and type of the logs. @OptionalCustomCoding> @@ -4041,13 +4041,13 @@ extension ElastiCache { public var securityGroupIds: [String]? /// The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. public let snapshotRetentionLimit: Int? - /// The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups. + /// The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups. public let snapshottingClusterId: String? /// The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId. Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. public let snapshotWindow: String? /// A flag that enables in-transit encryption when set to true. If you are enabling in-transit encryption for an existing cluster, you must also set TransitEncryptionMode to preferred. public let transitEncryptionEnabled: Bool? - /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. + /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. public let transitEncryptionMode: TransitEncryptionMode? /// The ID of the user group you are associating with the replication group. @OptionalCustomCoding> @@ -4186,13 +4186,13 @@ extension ElastiCache { public let applyImmediately: Bool? /// The number of node groups (shards) that results from the modification of the shard configuration. public let nodeGroupCount: Int? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. @OptionalCustomCoding> public var nodeGroupsToRemove: [String]? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache for Redis will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. @OptionalCustomCoding> public var nodeGroupsToRetain: [String]? - /// The name of the Redis (cluster mode enabled) cluster (replication group) on which the shards are to be configured. + /// The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured. public let replicationGroupId: String? /// Specifies the preferred availability zones for each node group in the cluster. If the value of NodeGroupCount is greater than the current number of node groups (shards), you can use this parameter to specify the preferred availability zones of the cluster's shards. If you omit this parameter ElastiCache selects availability zones for you. You can specify this parameter only if the value of NodeGroupCount is greater than the current number of node groups (shards). @OptionalCustomCoding> @@ -4250,20 +4250,20 @@ extension ElastiCache { /// Modify the cache usage limit for the serverless cache. public let cacheUsageLimits: CacheUsageLimits? - /// The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed. + /// The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed. public let dailySnapshotTime: String? /// User provided description for the serverless cache. Default = NULL, i.e. the existing description is not removed/modified. The description has a maximum length of 255 characters. public let description: String? - /// The identifier of the UserGroup to be removed from association with the Redis serverless cache. Available for Redis only. Default is NULL. + /// The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL. public let removeUserGroup: Bool? /// The new list of VPC security groups to be associated with the serverless cache. Populating this list means the current VPC security groups will be removed. This security group is used to authorize traffic access for the VPC end-point (private-link). Default = NULL - the existing list of VPC security groups is not removed. @OptionalCustomCoding> public var securityGroupIds: [String]? /// User-provided identifier for the serverless cache to be modified. public let serverlessCacheName: String? - /// The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days. + /// The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days. public let snapshotRetentionLimit: Int? - /// The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL - the existing UserGroup is not removed. + /// The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not removed. public let userGroupId: String? public init(cacheUsageLimits: CacheUsageLimits? = nil, dailySnapshotTime: String? = nil, description: String? = nil, removeUserGroup: Bool? = nil, securityGroupIds: [String]? = nil, serverlessCacheName: String? = nil, snapshotRetentionLimit: Int? = nil, userGroupId: String? = nil) { @@ -4384,7 +4384,7 @@ extension ElastiCache { public struct NodeGroup: AWSDecodableShape { public struct _NodeGroupMembersEncoding: ArrayCoderProperties { public static let member = "NodeGroupMember" } - /// The identifier for the node group (shard). A Redis (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group. + /// The identifier for the node group (shard). A Redis OSS (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group. public let nodeGroupId: String? /// A list containing information about individual nodes within the node group (shard). @OptionalCustomCoding> @@ -4421,7 +4421,7 @@ extension ElastiCache { public struct _ReplicaAvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "AvailabilityZone" } public struct _ReplicaOutpostArnsEncoding: ArrayCoderProperties { public static let member = "OutpostArn" } - /// Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to. + /// Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to. public let nodeGroupId: String? /// The Availability Zone where the primary node of this node group (shard) is launched. public let primaryAvailabilityZone: String? @@ -4470,13 +4470,13 @@ extension ElastiCache { public let cacheClusterId: String? /// The ID of the node within its cluster. A node ID is a numeric identifier (0001, 0002, etc.). public let cacheNodeId: String? - /// The role that is currently assigned to the node - primary or replica. This member is only applicable for Redis (cluster mode disabled) replication groups. + /// The role that is currently assigned to the node - primary or replica. This member is only applicable for Redis OSS (cluster mode disabled) replication groups. public let currentRole: String? /// The name of the Availability Zone in which the node is located. public let preferredAvailabilityZone: String? /// The outpost ARN of the node group member. public let preferredOutpostArn: String? - /// The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Redis (cluster mode disabled) clusters. + /// The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Redis OSS (cluster mode disabled) clusters. public let readEndpoint: Endpoint? public init(cacheClusterId: String? = nil, cacheNodeId: String? = nil, currentRole: String? = nil, preferredAvailabilityZone: String? = nil, preferredOutpostArn: String? = nil, readEndpoint: Endpoint? = nil) { @@ -4719,7 +4719,7 @@ extension ElastiCache { /// The log delivery configurations being modified @OptionalCustomCoding> public var logDeliveryConfigurations: [PendingLogDeliveryConfiguration]? - /// The new number of cache nodes for the cluster. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. + /// The new number of cache nodes for the cluster. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. public let numCacheNodes: Int? /// A flag that enables in-transit encryption when set to true. public let transitEncryptionEnabled: Bool? @@ -4756,7 +4756,7 @@ extension ElastiCache { public let replicationGroupId: String? /// The unique ID of the service update public let serviceUpdateName: String? - /// The status of the update action on the Redis cluster + /// The status of the update action on the Redis OSS cluster public let updateActionStatus: UpdateActionStatus? public init(cacheClusterId: String? = nil, replicationGroupId: String? = nil, serviceUpdateName: String? = nil, updateActionStatus: UpdateActionStatus? = nil) { @@ -4948,21 +4948,21 @@ extension ElastiCache { /// The ARN (Amazon Resource Name) of the replication group. public let arn: String? - /// A flag that enables encryption at-rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + /// A flag that enables encryption at-rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let atRestEncryptionEnabled: Bool? - /// A flag that enables using an AuthToken (password) when issuing Redis commands. Default: false + /// A flag that enables using an AuthToken (password) when issuing Redis OSS commands. Default: false public let authTokenEnabled: Bool? /// The date the auth token was last modified public let authTokenLastModifiedDate: Date? - /// Indicates the status of automatic failover for this Redis replication group. + /// Indicates the status of automatic failover for this Redis OSS replication group. public let automaticFailover: AutomaticFailoverStatus? - /// If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + /// If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The name of the compute and memory capacity node type for each node in the replication group. public let cacheNodeType: String? /// A flag indicating whether or not this replication group is cluster enabled; i.e., whether its data can be partitioned across multiple shards (API/CLI: node groups). Valid values: true | false public let clusterEnabled: Bool? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// The configuration endpoint for this replication group. Use the configuration endpoint to connect to this replication group. public let configurationEndpoint: Endpoint? @@ -4972,7 +4972,7 @@ extension ElastiCache { public let description: String? /// The name of the Global datastore and role of this replication group in the Global datastore. public let globalReplicationGroupInfo: GlobalReplicationGroupInfo? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// The ID of the KMS key used to encrypt the disk in the cluster. public let kmsKeyId: String? @@ -4987,9 +4987,9 @@ extension ElastiCache { public var memberClustersOutpostArns: [String]? /// A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ public let multiAZ: MultiAZStatus? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. public let networkType: NetworkType? - /// A list of node groups in this replication group. For Redis (cluster mode disabled) replication groups, this is a single-element list. For Redis (cluster mode enabled) replication groups, the list contains an entry for each node group (shard). + /// A list of node groups in this replication group. For Redis OSS (cluster mode disabled) replication groups, this is a single-element list. For Redis OSS (cluster mode enabled) replication groups, the list contains an entry for each node group (shard). @OptionalCustomCoding> public var nodeGroups: [NodeGroup]? /// A group of settings to be applied to the replication group, either immediately or during the next maintenance window. @@ -5006,7 +5006,7 @@ extension ElastiCache { public let snapshotWindow: String? /// The current state of this replication group - creating, available, modifying, deleting, create-failed, snapshotting. public let status: String? - /// A flag that enables in-transit encryption when set to true. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + /// A flag that enables in-transit encryption when set to true. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let transitEncryptionEnabled: Bool? /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. public let transitEncryptionMode: TransitEncryptionMode? @@ -5106,9 +5106,9 @@ extension ElastiCache { public struct ReplicationGroupPendingModifiedValues: AWSDecodableShape { /// The auth token status public let authTokenStatus: AuthTokenUpdateStatus? - /// Indicates the status of automatic failover for this Redis replication group. + /// Indicates the status of automatic failover for this Redis OSS replication group. public let automaticFailoverStatus: PendingAutomaticFailoverStatus? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// The log delivery configurations being modified @OptionalCustomCoding> @@ -5154,7 +5154,7 @@ extension ElastiCache { /// The number of cache nodes that have been reserved. public let cacheNodeCount: Int? - /// The cache node type for the reserved cache nodes. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The cache node type for the reserved cache nodes. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -5162,7 +5162,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -5175,14 +5175,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The duration of the reservation in seconds. public let duration: Int? @@ -5264,7 +5264,7 @@ extension ElastiCache { public struct ReservedCacheNodesOffering: AWSDecodableShape { public struct _RecurringChargesEncoding: ArrayCoderProperties { public static let member = "RecurringCharge" } - /// The cache node type for the reserved cache node. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The cache node type for the reserved cache node. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -5272,7 +5272,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -5285,14 +5285,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The duration of the offering. in seconds. public let duration: Int? @@ -5380,7 +5380,7 @@ extension ElastiCache { public struct ReshardingConfiguration: AWSEncodableShape { public struct _PreferredAvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "AvailabilityZone" } - /// Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to. + /// Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to. public let nodeGroupId: String? /// A list of preferred availability zones for the nodes in this cluster. @OptionalCustomCoding> @@ -5476,7 +5476,7 @@ extension ElastiCache { public let cacheUsageLimits: CacheUsageLimits? /// When the serverless cache was created. public let createTime: Date? - /// The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis only. + /// The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only. public let dailySnapshotTime: String? /// A description of the serverless cache. public let description: String? @@ -5495,14 +5495,14 @@ extension ElastiCache { public var securityGroupIds: [String]? /// The unique identifier of the serverless cache. public let serverlessCacheName: String? - /// The current setting for the number of serverless cache snapshots the system will retain. Available for Redis only. + /// The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only. public let snapshotRetentionLimit: Int? /// The current status of the serverless cache. The allowed values are CREATING, AVAILABLE, DELETING, CREATE-FAILED and MODIFYING. public let status: String? /// If no subnet IDs are given and your VPC is in us-west-1, then ElastiCache will select 2 default subnets across AZs in your VPC. For all other Regions, if no subnet IDs are given then ElastiCache will select 3 default subnets across AZs in your default VPC. @OptionalCustomCoding> public var subnetIds: [String]? - /// The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL. + /// The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL. public let userGroupId: String? public init(arn: String? = nil, cacheUsageLimits: CacheUsageLimits? = nil, createTime: Date? = nil, dailySnapshotTime: String? = nil, description: String? = nil, endpoint: Endpoint? = nil, engine: String? = nil, fullEngineVersion: String? = nil, kmsKeyId: String? = nil, majorEngineVersion: String? = nil, readerEndpoint: Endpoint? = nil, securityGroupIds: [String]? = nil, serverlessCacheName: String? = nil, snapshotRetentionLimit: Int? = nil, status: String? = nil, subnetIds: [String]? = nil, userGroupId: String? = nil) { @@ -5568,23 +5568,23 @@ extension ElastiCache { } public struct ServerlessCacheSnapshot: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis only. + /// The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. public let arn: String? - /// The total size of a serverless cache snapshot, in bytes. Available for Redis only. + /// The total size of a serverless cache snapshot, in bytes. Available for Redis OSS and Serverless Memcached only. public let bytesUsedForCache: String? - /// The date and time that the source serverless cache's metadata and cache data set was obtained for the snapshot. Available for Redis only. + /// The date and time that the source serverless cache's metadata and cache data set was obtained for the snapshot. Available for Redis OSS and Serverless Memcached only. public let createTime: Date? - /// The time that the serverless cache snapshot will expire. Available for Redis only. + /// The time that the serverless cache snapshot will expire. Available for Redis OSS and Serverless Memcached only. public let expiryTime: Date? - /// The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis only. + /// The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. public let kmsKeyId: String? - /// The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis only. + /// The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheConfiguration: ServerlessCacheConfiguration? - /// The identifier of a serverless cache snapshot. Available for Redis only. + /// The identifier of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshotName: String? - /// The type of snapshot of serverless cache. Available for Redis only. + /// The type of snapshot of serverless cache. Available for Redis OSS and Serverless Memcached only. public let snapshotType: String? - /// The current status of the serverless cache. Available for Redis only. + /// The current status of the serverless cache. Available for Redis OSS and Serverless Memcached only. public let status: String? public init(arn: String? = nil, bytesUsedForCache: String? = nil, createTime: Date? = nil, expiryTime: Date? = nil, kmsKeyId: String? = nil, serverlessCacheConfiguration: ServerlessCacheConfiguration? = nil, serverlessCacheSnapshotName: String? = nil, snapshotType: String? = nil, status: String? = nil) { @@ -5615,9 +5615,9 @@ extension ElastiCache { public struct ServiceUpdate: AWSDecodableShape { /// Indicates whether the service update will be automatically applied once the recommended apply-by date has expired. public let autoUpdateAfterRecommendedApplyByDate: Bool? - /// The Elasticache engine to which the update applies. Either Redis or Memcached + /// The Elasticache engine to which the update applies. Either Redis OSS or Memcached. public let engine: String? - /// The Elasticache engine version to which the update applies. Either Redis or Memcached engine version + /// The Elasticache engine version to which the update applies. Either Redis OSS or Memcached engine version. public let engineVersion: String? /// The estimated length of time the service update will take public let estimatedUpdateTime: String? @@ -5707,15 +5707,15 @@ extension ElastiCache { /// The ARN (Amazon Resource Name) of the snapshot. public let arn: String? - /// Indicates the status of automatic failover for the source Redis replication group. + /// Indicates the status of automatic failover for the source Redis OSS replication group. public let automaticFailover: AutomaticFailoverStatus? - ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The date and time when the source cluster was created. public let cacheClusterCreateTime: Date? /// The user-supplied identifier of the source cluster. public let cacheClusterId: String? - /// The name of the compute and memory capacity node type for the source cluster. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// The name of the compute and memory capacity node type for the source cluster. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// /// cache.m6g.large, /// cache.m6g.xlarge, @@ -5723,7 +5723,7 @@ extension ElastiCache { /// cache.m6g.4xlarge, /// cache.m6g.8xlarge, /// cache.m6g.12xlarge, - /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + /// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): /// cache.t4g.micro, /// cache.t4g.small, /// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, @@ -5736,14 +5736,14 @@ extension ElastiCache { /// cache.r7g.4xlarge, /// cache.r7g.8xlarge, /// cache.r7g.12xlarge, - /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + /// cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// cache.r6g.large, /// cache.r6g.xlarge, /// cache.r6g.2xlarge, /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The cache parameter group that is associated with the source cluster. public let cacheParameterGroupName: String? @@ -5760,7 +5760,7 @@ extension ElastiCache { /// A list of the cache nodes in the source cluster. @OptionalCustomCoding> public var nodeSnapshots: [NodeSnapshot]? - /// The number of cache nodes in the source cluster. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. + /// The number of cache nodes in the source cluster. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. public let numCacheNodes: Int? /// The number of node groups (shards) in this snapshot. When restoring from a snapshot, the number of node groups (shards) in the snapshot and in the restored replication group must be the same. public let numNodeGroups: Int? @@ -5855,7 +5855,7 @@ extension ElastiCache { } public struct StartMigrationMessage: AWSEncodableShape { - /// List of endpoints from which data should be migrated. For Redis (cluster mode disabled), list should have only one element. + /// List of endpoints from which data should be migrated. For Redis OSS (cluster mode disabled), list should have only one element. @OptionalCustomCoding> public var customerNodeEndpointList: [CustomerNodeEndpoint]? /// The ID of the replication group to which data should be migrated. @@ -5891,7 +5891,7 @@ extension ElastiCache { public let subnetIdentifier: String? /// The outpost ARN of the subnet. public let subnetOutpost: SubnetOutpost? - /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. @OptionalCustomCoding> public var supportedNetworkTypes: [NetworkType]? @@ -6076,7 +6076,7 @@ extension ElastiCache { /// The status of the service update on the cache node @OptionalCustomCoding> public var cacheNodeUpdateStatus: [CacheNodeUpdateStatus]? - /// The Elasticache engine to which the update applies. Either Redis or Memcached + /// The Elasticache engine to which the update applies. Either Redis OSS or Memcached. public let engine: String? /// The estimated length of time for the update to complete public let estimatedUpdateTime: String? @@ -6200,7 +6200,7 @@ extension ElastiCache { public let authentication: Authentication? /// The current supported value is Redis. public let engine: String? - /// The minimum engine version required, which is Redis 6.0 + /// The minimum engine version required, which is Redis OSS 6.0 public let minimumEngineVersion: String? /// Indicates the user status. Can be "active", "modifying" or "deleting". public let status: String? @@ -6240,16 +6240,16 @@ extension ElastiCache { public struct UserGroup: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the user group. public let arn: String? - /// The current supported value is Redis. + /// The current supported value is Redis user. public let engine: String? - /// The minimum engine version required, which is Redis 6.0 + /// The minimum engine version required, which is Redis OSS 6.0 public let minimumEngineVersion: String? /// A list of updates being applied to the user group. public let pendingChanges: UserGroupPendingChanges? /// A list of replication groups that the user group can access. @OptionalCustomCoding> public var replicationGroups: [String]? - /// Indicates which serverless caches the specified user group is associated with. Available for Redis only. + /// Indicates which serverless caches the specified user group is associated with. Available for Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var serverlessCaches: [String]? /// Indicates user group status. Can be "creating", "active", "modifying", "deleting". @@ -6491,7 +6491,7 @@ public struct ElastiCacheErrorType: AWSErrorType { public static var invalidParameterValueException: Self { .init(.invalidParameterValueException) } /// The requested replication group is not in the available state. public static var invalidReplicationGroupStateFault: Self { .init(.invalidReplicationGroupStateFault) } - /// The state of the serverless cache snapshot was not received. Available for Redis only. + /// The state of the serverless cache snapshot was not received. Available for Redis OSS and Serverless Memcached only. public static var invalidServerlessCacheSnapshotStateFault: Self { .init(.invalidServerlessCacheSnapshotStateFault) } /// The account for these credentials is not currently active. public static var invalidServerlessCacheStateFault: Self { .init(.invalidServerlessCacheStateFault) } @@ -6537,11 +6537,11 @@ public struct ElastiCacheErrorType: AWSErrorType { public static var serverlessCacheNotFoundFault: Self { .init(.serverlessCacheNotFoundFault) } /// The number of serverless caches exceeds the customer quota. public static var serverlessCacheQuotaForCustomerExceededFault: Self { .init(.serverlessCacheQuotaForCustomerExceededFault) } - /// A serverless cache snapshot with this name already exists. Available for Redis only. + /// A serverless cache snapshot with this name already exists. Available for Redis OSS and Serverless Memcached only. public static var serverlessCacheSnapshotAlreadyExistsFault: Self { .init(.serverlessCacheSnapshotAlreadyExistsFault) } - /// This serverless cache snapshot could not be found or does not exist. Available for Redis only. + /// This serverless cache snapshot could not be found or does not exist. Available for Redis OSS and Serverless Memcached only. public static var serverlessCacheSnapshotNotFoundFault: Self { .init(.serverlessCacheSnapshotNotFoundFault) } - /// The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis only. + /// The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis OSS and Serverless Memcached only. public static var serverlessCacheSnapshotQuotaExceededFault: Self { .init(.serverlessCacheSnapshotQuotaExceededFault) } /// The specified service linked role (SLR) was not found. public static var serviceLinkedRoleNotFoundFault: Self { .init(.serviceLinkedRoleNotFoundFault) } @@ -6549,7 +6549,7 @@ public struct ElastiCacheErrorType: AWSErrorType { public static var serviceUpdateNotFoundFault: Self { .init(.serviceUpdateNotFoundFault) } /// You already have a snapshot with the given name. public static var snapshotAlreadyExistsFault: Self { .init(.snapshotAlreadyExistsFault) } - /// You attempted one of the following operations: Creating a snapshot of a Redis cluster running on a cache.t1.micro cache node. Creating a snapshot of a cluster that is running Memcached rather than Redis. Neither of these are supported by ElastiCache. + /// You attempted one of the following operations: Creating a snapshot of a Redis OSS cluster running on a cache.t1.micro cache node. Creating a snapshot of a cluster that is running Memcached rather than Redis OSS. Neither of these are supported by ElastiCache. public static var snapshotFeatureNotSupportedFault: Self { .init(.snapshotFeatureNotSupportedFault) } /// The requested snapshot name does not refer to an existing snapshot. public static var snapshotNotFoundFault: Self { .init(.snapshotNotFoundFault) } diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift index 3f88e5f416..ff6b623e44 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift @@ -229,6 +229,19 @@ public struct ElasticLoadBalancingV2: AWSService { ) } + /// Deletes a shared trust store association. + @Sendable + public func deleteSharedTrustStoreAssociation(_ input: DeleteSharedTrustStoreAssociationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteSharedTrustStoreAssociationOutput { + return try await self.client.execute( + operation: "DeleteSharedTrustStoreAssociation", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the specified target group. You can delete a target group if it is not referenced by any actions. Deleting a target group also deletes any associated health checks. Deleting a target group does not affect its registered targets. For example, any EC2 instances continue to run until you stop or terminate them. @Sendable public func deleteTargetGroup(_ input: DeleteTargetGroupInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTargetGroupOutput { @@ -424,7 +437,7 @@ public struct ElasticLoadBalancingV2: AWSService { ) } - /// Describes the revocation files in use by the specified trust store arn, or revocation ID. + /// Describes the revocation files in use by the specified trust store or revocation files. @Sendable public func describeTrustStoreRevocations(_ input: DescribeTrustStoreRevocationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTrustStoreRevocationsOutput { return try await self.client.execute( @@ -437,7 +450,7 @@ public struct ElasticLoadBalancingV2: AWSService { ) } - /// Describes all trust stores for a given account by trust store arn’s or name. + /// Describes all trust stores for the specified account. @Sendable public func describeTrustStores(_ input: DescribeTrustStoresInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTrustStoresOutput { return try await self.client.execute( @@ -450,6 +463,19 @@ public struct ElasticLoadBalancingV2: AWSService { ) } + /// Retrieves the resource policy for a specified resource. + @Sendable + public func getResourcePolicy(_ input: GetResourcePolicyInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetResourcePolicyOutput { + return try await self.client.execute( + operation: "GetResourcePolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the ca certificate bundle. This action returns a pre-signed S3 URI which is active for ten minutes. @Sendable public func getTrustStoreCaCertificatesBundle(_ input: GetTrustStoreCaCertificatesBundleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTrustStoreCaCertificatesBundleOutput { @@ -541,7 +567,7 @@ public struct ElasticLoadBalancingV2: AWSService { ) } - /// Update the ca certificate bundle for a given trust store. + /// Update the ca certificate bundle for the specified trust store. @Sendable public func modifyTrustStore(_ input: ModifyTrustStoreInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyTrustStoreOutput { return try await self.client.execute( @@ -748,7 +774,7 @@ extension ElasticLoadBalancingV2 { ) } - /// Describes the revocation files in use by the specified trust store arn, or revocation ID. + /// Describes the revocation files in use by the specified trust store or revocation files. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -767,7 +793,7 @@ extension ElasticLoadBalancingV2 { ) } - /// Describes all trust stores for a given account by trust store arn’s or name. + /// Describes all trust stores for the specified account. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift index 7f4d58aa19..d504ac9f04 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift @@ -164,6 +164,12 @@ extension ElasticLoadBalancingV2 { public var description: String { return self.rawValue } } + public enum TrustStoreAssociationStatusEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "active" + case removed = "removed" + public var description: String { return self.rawValue } + } + public enum TrustStoreStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case creating = "CREATING" @@ -915,6 +921,27 @@ extension ElasticLoadBalancingV2 { public init() {} } + public struct DeleteSharedTrustStoreAssociationInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String? + /// The Amazon Resource Name (ARN) of the trust store. + public let trustStoreArn: String? + + public init(resourceArn: String? = nil, trustStoreArn: String? = nil) { + self.resourceArn = resourceArn + self.trustStoreArn = trustStoreArn + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case trustStoreArn = "TrustStoreArn" + } + } + + public struct DeleteSharedTrustStoreAssociationOutput: AWSDecodableShape { + public init() {} + } + public struct DeleteTargetGroupInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the target group. public let targetGroupArn: String? @@ -1395,7 +1422,7 @@ extension ElasticLoadBalancingV2 { } public struct DescribeTargetHealthInput: AWSEncodableShape { - /// Used to inclue anomaly detection information. + /// Used to include anomaly detection information. @OptionalCustomCoding> public var include: [DescribeTargetHealthInputIncludeEnum]? /// The Amazon Resource Name (ARN) of the target group. @@ -1655,6 +1682,32 @@ extension ElasticLoadBalancingV2 { } } + public struct GetResourcePolicyInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String? + + public init(resourceArn: String? = nil) { + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + } + } + + public struct GetResourcePolicyOutput: AWSDecodableShape { + /// The content of the resource policy. + public let policy: String? + + public init(policy: String? = nil) { + self.policy = policy + } + + private enum CodingKeys: String, CodingKey { + case policy = "Policy" + } + } + public struct GetTrustStoreCaCertificatesBundleInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the trust store. public let trustStoreArn: String? @@ -2282,17 +2335,21 @@ extension ElasticLoadBalancingV2 { public let mode: String? /// The Amazon Resource Name (ARN) of the trust store. public let trustStoreArn: String? + /// Indicates a shared trust stores association status. + public let trustStoreAssociationStatus: TrustStoreAssociationStatusEnum? - public init(ignoreClientCertificateExpiry: Bool? = nil, mode: String? = nil, trustStoreArn: String? = nil) { + public init(ignoreClientCertificateExpiry: Bool? = nil, mode: String? = nil, trustStoreArn: String? = nil, trustStoreAssociationStatus: TrustStoreAssociationStatusEnum? = nil) { self.ignoreClientCertificateExpiry = ignoreClientCertificateExpiry self.mode = mode self.trustStoreArn = trustStoreArn + self.trustStoreAssociationStatus = trustStoreAssociationStatus } private enum CodingKeys: String, CodingKey { case ignoreClientCertificateExpiry = "IgnoreClientCertificateExpiry" case mode = "Mode" case trustStoreArn = "TrustStoreArn" + case trustStoreAssociationStatus = "TrustStoreAssociationStatus" } } @@ -2611,7 +2668,7 @@ extension ElasticLoadBalancingV2 { } public struct SetIpAddressTypeInput: AWSEncodableShape { - /// Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). + /// Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). public let ipAddressType: IpAddressType? /// The Amazon Resource Name (ARN) of the load balancer. public let loadBalancerArn: String? @@ -3157,6 +3214,7 @@ public struct ElasticLoadBalancingV2ErrorType: AWSErrorType { case availabilityZoneNotSupportedException = "AvailabilityZoneNotSupported" case caCertificatesBundleNotFoundException = "CaCertificatesBundleNotFound" case certificateNotFoundException = "CertificateNotFound" + case deleteAssociationSameAccountException = "DeleteAssociationSameAccount" case duplicateListenerException = "DuplicateListener" case duplicateLoadBalancerNameException = "DuplicateLoadBalancerName" case duplicateTagKeysException = "DuplicateTagKeys" @@ -3177,6 +3235,7 @@ public struct ElasticLoadBalancingV2ErrorType: AWSErrorType { case operationNotPermittedException = "OperationNotPermitted" case priorityInUseException = "PriorityInUse" case resourceInUseException = "ResourceInUse" + case resourceNotFoundException = "ResourceNotFound" case revocationContentNotFoundException = "RevocationContentNotFound" case revocationIdNotFoundException = "RevocationIdNotFound" case ruleNotFoundException = "RuleNotFound" @@ -3196,6 +3255,7 @@ public struct ElasticLoadBalancingV2ErrorType: AWSErrorType { case tooManyTrustStoreRevocationEntriesException = "TooManyTrustStoreRevocationEntries" case tooManyTrustStoresException = "TooManyTrustStores" case tooManyUniqueTargetGroupsPerLoadBalancerException = "TooManyUniqueTargetGroupsPerLoadBalancer" + case trustStoreAssociationNotFoundException = "AssociationNotFound" case trustStoreInUseException = "TrustStoreInUse" case trustStoreNotFoundException = "TrustStoreNotFound" case trustStoreNotReadyException = "TrustStoreNotReady" @@ -3230,6 +3290,8 @@ public struct ElasticLoadBalancingV2ErrorType: AWSErrorType { public static var caCertificatesBundleNotFoundException: Self { .init(.caCertificatesBundleNotFoundException) } /// The specified certificate does not exist. public static var certificateNotFoundException: Self { .init(.certificateNotFoundException) } + /// The specified association cannot be within the same account. + public static var deleteAssociationSameAccountException: Self { .init(.deleteAssociationSameAccountException) } /// A listener with the specified port already exists. public static var duplicateListenerException: Self { .init(.duplicateListenerException) } /// A load balancer with the specified name already exists. @@ -3270,6 +3332,8 @@ public struct ElasticLoadBalancingV2ErrorType: AWSErrorType { public static var priorityInUseException: Self { .init(.priorityInUseException) } /// A specified resource is in use. public static var resourceInUseException: Self { .init(.resourceInUseException) } + /// The specified resource does not exist. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The specified revocation file does not exist. public static var revocationContentNotFoundException: Self { .init(.revocationContentNotFoundException) } /// The specified revocation ID does not exist. @@ -3308,6 +3372,8 @@ public struct ElasticLoadBalancingV2ErrorType: AWSErrorType { public static var tooManyTrustStoresException: Self { .init(.tooManyTrustStoresException) } /// You've reached the limit on the number of unique target groups per load balancer across all listeners. If a target group is used by multiple actions for a load balancer, it is counted as only one use. public static var tooManyUniqueTargetGroupsPerLoadBalancerException: Self { .init(.tooManyUniqueTargetGroupsPerLoadBalancerException) } + /// The specified association does not exist. + public static var trustStoreAssociationNotFoundException: Self { .init(.trustStoreAssociationNotFoundException) } /// The specified trust store is currently in use. public static var trustStoreInUseException: Self { .init(.trustStoreInUseException) } /// The specified trust store does not exist. diff --git a/Sources/Soto/Services/EntityResolution/EntityResolution_shapes.swift b/Sources/Soto/Services/EntityResolution/EntityResolution_shapes.swift index 9cec6e8c9a..f071b98d69 100644 --- a/Sources/Soto/Services/EntityResolution/EntityResolution_shapes.swift +++ b/Sources/Soto/Services/EntityResolution/EntityResolution_shapes.swift @@ -46,6 +46,13 @@ extension EntityResolution { public enum IdMappingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case provider = "PROVIDER" + case ruleBased = "RULE_BASED" + public var description: String { return self.rawValue } + } + + public enum IdMappingWorkflowRuleDefinitionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case source = "SOURCE" + case target = "TARGET" public var description: String { return self.rawValue } } @@ -68,6 +75,18 @@ extension EntityResolution { public var description: String { return self.rawValue } } + public enum MatchPurpose: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case identifierGeneration = "IDENTIFIER_GENERATION" + case indexing = "INDEXING" + public var description: String { return self.rawValue } + } + + public enum RecordMatchingModel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case manySourceToOneTarget = "MANY_SOURCE_TO_ONE_TARGET" + case oneSourceToOneTarget = "ONE_SOURCE_TO_ONE_TARGET" + public var description: String { return self.rawValue } + } + public enum ResolutionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case mlMatching = "ML_MATCHING" case provider = "PROVIDER" @@ -120,7 +139,7 @@ extension EntityResolution { public let arn: String /// A set of condition keys that you can use in key policies. public let condition: String? - /// Determines whether the permissions specified in the policy are to be allowed (Allow) or denied (Deny). + /// Determines whether the permissions specified in the policy are to be allowed (Allow) or denied (Deny). If you set the value of the effect parameter to Deny for the AddPolicyStatement operation, you must also set the value of the effect parameter in the policy to Deny for the PutPolicy operation. public let effect: StatementEffect /// The Amazon Web Services service or Amazon Web Services account that can access the resource defined as ARN. public let principal: [String] @@ -223,7 +242,7 @@ extension EntityResolution { try self.uniqueIds.forEach { try validate($0, name: "uniqueIds[]", parent: name, max: 760) try validate($0, name: "uniqueIds[]", parent: name, min: 1) - try validate($0, name: "uniqueIds[]", parent: name, pattern: "^[a-zA-Z_0-9-,]*$") + try validate($0, name: "uniqueIds[]", parent: name, pattern: "^[a-zA-Z_0-9-+=/,]*$") } try self.validate(self.workflowName, name: "workflowName", parent: name, max: 255) try self.validate(self.workflowName, name: "workflowName", parent: name, min: 1) @@ -261,20 +280,20 @@ extension EntityResolution { public struct CreateIdMappingWorkflowInput: AWSEncodableShape { /// A description of the workflow. public let description: String? - /// An object which defines the idMappingType and the providerProperties. + /// An object which defines the ID mapping technique and any additional configurations. public let idMappingTechniques: IdMappingTechniques /// A list of InputSource objects, which have the fields InputSourceARN and SchemaName. public let inputSourceConfig: [IdMappingWorkflowInputSource] /// A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output. public let outputSourceConfig: [IdMappingWorkflowOutputSource]? /// The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to create resources on your behalf as part of workflow execution. - public let roleArn: String + public let roleArn: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? /// The name of the workflow. There can't be multiple IdMappingWorkflows with the same name. public let workflowName: String - public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String, tags: [String: String]? = nil, workflowName: String) { + public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String? = nil, tags: [String: String]? = nil, workflowName: String) { self.description = description self.idMappingTechniques = idMappingTechniques self.inputSourceConfig = inputSourceConfig @@ -298,8 +317,7 @@ extension EntityResolution { try self.validate(self.outputSourceConfig, name: "outputSourceConfig", parent: name, max: 1) try self.validate(self.outputSourceConfig, name: "outputSourceConfig", parent: name, min: 1) try self.validate(self.roleArn, name: "roleArn", parent: name, max: 512) - try self.validate(self.roleArn, name: "roleArn", parent: name, min: 32) - try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^$|^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -325,20 +343,20 @@ extension EntityResolution { public struct CreateIdMappingWorkflowOutput: AWSDecodableShape { /// A description of the workflow. public let description: String? - /// An object which defines the idMappingType and the providerProperties. + /// An object which defines the ID mapping technique and any additional configurations. public let idMappingTechniques: IdMappingTechniques /// A list of InputSource objects, which have the fields InputSourceARN and SchemaName. public let inputSourceConfig: [IdMappingWorkflowInputSource] /// A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output. public let outputSourceConfig: [IdMappingWorkflowOutputSource]? /// The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to create resources on your behalf as part of workflow execution. - public let roleArn: String + public let roleArn: String? /// The ARN (Amazon Resource Name) that Entity Resolution generated for the IDMappingWorkflow. public let workflowArn: String /// The name of the workflow. public let workflowName: String - public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String, workflowArn: String, workflowName: String) { + public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String? = nil, workflowArn: String, workflowName: String) { self.description = description self.idMappingTechniques = idMappingTechniques self.inputSourceConfig = inputSourceConfig @@ -970,14 +988,14 @@ extension EntityResolution { public let createdAt: Date /// A description of the workflow. public let description: String? - /// An object which defines the idMappingType and the providerProperties. + /// An object which defines the ID mapping technique and any additional configurations. public let idMappingTechniques: IdMappingTechniques /// A list of InputSource objects, which have the fields InputSourceARN and SchemaName. public let inputSourceConfig: [IdMappingWorkflowInputSource] /// A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn. public let outputSourceConfig: [IdMappingWorkflowOutputSource]? /// The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf. - public let roleArn: String + public let roleArn: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? /// The timestamp of when the workflow was last updated. @@ -987,7 +1005,7 @@ extension EntityResolution { /// The name of the workflow. public let workflowName: String - public init(createdAt: Date, description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String, tags: [String: String]? = nil, updatedAt: Date, workflowArn: String, workflowName: String) { + public init(createdAt: Date, description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String? = nil, tags: [String: String]? = nil, updatedAt: Date, workflowArn: String, workflowName: String) { self.createdAt = createdAt self.description = description self.idMappingTechniques = idMappingTechniques @@ -1474,22 +1492,34 @@ extension EntityResolution { } public struct IdMappingJobMetrics: AWSDecodableShape { - /// The total number of input records. + /// The total number of records that were input for processing. public let inputRecords: Int? /// The total number of records that did not get processed. public let recordsNotProcessed: Int? - /// The total number of records processed. + /// The total number of records that were mapped. + public let totalMappedRecords: Int? + /// The total number of mapped source records. + public let totalMappedSourceRecords: Int? + /// The total number of distinct mapped target records. + public let totalMappedTargetRecords: Int? + /// The total number of records that were processed. public let totalRecordsProcessed: Int? - public init(inputRecords: Int? = nil, recordsNotProcessed: Int? = nil, totalRecordsProcessed: Int? = nil) { + public init(inputRecords: Int? = nil, recordsNotProcessed: Int? = nil, totalMappedRecords: Int? = nil, totalMappedSourceRecords: Int? = nil, totalMappedTargetRecords: Int? = nil, totalRecordsProcessed: Int? = nil) { self.inputRecords = inputRecords self.recordsNotProcessed = recordsNotProcessed + self.totalMappedRecords = totalMappedRecords + self.totalMappedSourceRecords = totalMappedSourceRecords + self.totalMappedTargetRecords = totalMappedTargetRecords self.totalRecordsProcessed = totalRecordsProcessed } private enum CodingKeys: String, CodingKey { case inputRecords = "inputRecords" case recordsNotProcessed = "recordsNotProcessed" + case totalMappedRecords = "totalMappedRecords" + case totalMappedSourceRecords = "totalMappedSourceRecords" + case totalMappedTargetRecords = "totalMappedTargetRecords" case totalRecordsProcessed = "totalRecordsProcessed" } } @@ -1525,33 +1555,69 @@ extension EntityResolution { } } + public struct IdMappingRuleBasedProperties: AWSEncodableShape & AWSDecodableShape { + /// The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the attributeMatchingModel. If you choose MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A matches the value of the BusinessEmail field of Profile B, the two profiles are matched on the Email attribute type. If you choose ONE_TO_ONE, the system can only match attributes if the sub-types are an exact match. For example, for the Email attribute type, the system will only consider it a match if the value of the Email field of Profile A matches the value of the Email field of Profile B. + public let attributeMatchingModel: AttributeMatchingModel + /// The type of matching record that is allowed to be used in an ID mapping workflow. If the value is set to ONE_SOURCE_TO_ONE_TARGET, only one record in the source can be matched to the same record in the target. If the value is set to MANY_SOURCE_TO_ONE_TARGET, multiple records in the source can be matched to one record in the target. + public let recordMatchingModel: RecordMatchingModel + /// The set of rules you can use in an ID mapping workflow. The limitations specified for the source or target to define the match rules must be compatible. + public let ruleDefinitionType: IdMappingWorkflowRuleDefinitionType + /// The rules that can be used for ID mapping. + public let rules: [Rule]? + + public init(attributeMatchingModel: AttributeMatchingModel, recordMatchingModel: RecordMatchingModel, ruleDefinitionType: IdMappingWorkflowRuleDefinitionType, rules: [Rule]? = nil) { + self.attributeMatchingModel = attributeMatchingModel + self.recordMatchingModel = recordMatchingModel + self.ruleDefinitionType = ruleDefinitionType + self.rules = rules + } + + public func validate(name: String) throws { + try self.rules?.forEach { + try $0.validate(name: "\(name).rules[]") + } + } + + private enum CodingKeys: String, CodingKey { + case attributeMatchingModel = "attributeMatchingModel" + case recordMatchingModel = "recordMatchingModel" + case ruleDefinitionType = "ruleDefinitionType" + case rules = "rules" + } + } + public struct IdMappingTechniques: AWSEncodableShape & AWSDecodableShape { /// The type of ID mapping. public let idMappingType: IdMappingType /// An object which defines any additional configurations required by the provider service. public let providerProperties: ProviderProperties? + /// An object which defines any additional configurations required by rule-based matching. + public let ruleBasedProperties: IdMappingRuleBasedProperties? - public init(idMappingType: IdMappingType, providerProperties: ProviderProperties? = nil) { + public init(idMappingType: IdMappingType, providerProperties: ProviderProperties? = nil, ruleBasedProperties: IdMappingRuleBasedProperties? = nil) { self.idMappingType = idMappingType self.providerProperties = providerProperties + self.ruleBasedProperties = ruleBasedProperties } public func validate(name: String) throws { try self.providerProperties?.validate(name: "\(name).providerProperties") + try self.ruleBasedProperties?.validate(name: "\(name).ruleBasedProperties") } private enum CodingKeys: String, CodingKey { case idMappingType = "idMappingType" case providerProperties = "providerProperties" + case ruleBasedProperties = "ruleBasedProperties" } } public struct IdMappingWorkflowInputSource: AWSEncodableShape & AWSDecodableShape { - /// An Glue table ARN for the input source table. + /// An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table. public let inputSourceARN: String /// The name of the schema to be retrieved. public let schemaName: String? - /// The type of ID namespace. There are two types: SOURCE and TARGET. The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow. The TARGET contains a configuration of targetId to which all sourceIds will resolve to. + /// The type of ID namespace. There are two types: SOURCE and TARGET. The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow. The TARGET contains a configuration of targetId which all sourceIds will resolve to. public let type: IdNamespaceType? public init(inputSourceARN: String, schemaName: String? = nil, type: IdNamespaceType? = nil) { @@ -1622,29 +1688,47 @@ extension EntityResolution { } } + public struct IdNamespaceIdMappingWorkflowMetadata: AWSDecodableShape { + /// The type of ID mapping. + public let idMappingType: IdMappingType + + public init(idMappingType: IdMappingType) { + self.idMappingType = idMappingType + } + + private enum CodingKeys: String, CodingKey { + case idMappingType = "idMappingType" + } + } + public struct IdNamespaceIdMappingWorkflowProperties: AWSEncodableShape & AWSDecodableShape { /// The type of ID mapping. public let idMappingType: IdMappingType /// An object which defines any additional configurations required by the provider service. public let providerProperties: NamespaceProviderProperties? + /// An object which defines any additional configurations required by rule-based matching. + public let ruleBasedProperties: NamespaceRuleBasedProperties? - public init(idMappingType: IdMappingType, providerProperties: NamespaceProviderProperties? = nil) { + public init(idMappingType: IdMappingType, providerProperties: NamespaceProviderProperties? = nil, ruleBasedProperties: NamespaceRuleBasedProperties? = nil) { self.idMappingType = idMappingType self.providerProperties = providerProperties + self.ruleBasedProperties = ruleBasedProperties } public func validate(name: String) throws { try self.providerProperties?.validate(name: "\(name).providerProperties") + try self.ruleBasedProperties?.validate(name: "\(name).ruleBasedProperties") } private enum CodingKeys: String, CodingKey { case idMappingType = "idMappingType" case providerProperties = "providerProperties" + case ruleBasedProperties = "ruleBasedProperties" } } public struct IdNamespaceInputSource: AWSEncodableShape & AWSDecodableShape { - /// An Glue table ARN for the input source table. + /// An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table. public let inputSourceARN: String /// The name of the schema. public let schemaName: String? @@ -1671,18 +1755,21 @@ extension EntityResolution { public let createdAt: Date /// The description of the ID namespace. public let description: String? + /// An object which defines any additional configurations required by the ID mapping workflow. + public let idMappingWorkflowProperties: [IdNamespaceIdMappingWorkflowMetadata]? /// The Amazon Resource Name (ARN) of the ID namespace. public let idNamespaceArn: String /// The name of the ID namespace. public let idNamespaceName: String - /// The type of ID namespace. There are two types: SOURCE and TARGET. The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow. The TARGET contains a configuration of targetId to which all sourceIds will resolve to. + /// The type of ID namespace. There are two types: SOURCE and TARGET. The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow. The TARGET contains a configuration of targetId which all sourceIds will resolve to. public let type: IdNamespaceType /// The timestamp of when the ID namespace was last updated. public let updatedAt: Date - public init(createdAt: Date, description: String? = nil, idNamespaceArn: String, idNamespaceName: String, type: IdNamespaceType, updatedAt: Date) { + public init(createdAt: Date, description: String? = nil, idMappingWorkflowProperties: [IdNamespaceIdMappingWorkflowMetadata]? = nil, idNamespaceArn: String, idNamespaceName: String, type: IdNamespaceType, updatedAt: Date) { self.createdAt = createdAt self.description = description + self.idMappingWorkflowProperties = idMappingWorkflowProperties self.idNamespaceArn = idNamespaceArn self.idNamespaceName = idNamespaceName self.type = type @@ -1692,6 +1779,7 @@ extension EntityResolution { private enum CodingKeys: String, CodingKey { case createdAt = "createdAt" case description = "description" + case idMappingWorkflowProperties = "idMappingWorkflowProperties" case idNamespaceArn = "idNamespaceArn" case idNamespaceName = "idNamespaceName" case type = "type" @@ -1715,7 +1803,7 @@ extension EntityResolution { public struct InputSource: AWSEncodableShape & AWSDecodableShape { /// Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890. public let applyNormalization: Bool? - /// An Glue table ARN for the input source table. + /// An Glue table Amazon Resource Name (ARN) for the input source table. public let inputSourceARN: String /// The name of the schema to be retrieved. public let schemaName: String @@ -2242,6 +2330,37 @@ extension EntityResolution { } } + public struct NamespaceRuleBasedProperties: AWSEncodableShape & AWSDecodableShape { + /// The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the attributeMatchingModel. If you choose MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A matches the value of BusinessEmail field of Profile B, the two profiles are matched on the Email attribute type. If you choose ONE_TO_ONE, the system can only match attributes if the sub-types are an exact match. For example, for the Email attribute type, the system will only consider it a match if the value of the Email field of Profile A matches the value of the Email field of Profile B. + public let attributeMatchingModel: AttributeMatchingModel? + /// The type of matching record that is allowed to be used in an ID mapping workflow. If the value is set to ONE_SOURCE_TO_ONE_TARGET, only one record in the source is matched to one record in the target. If the value is set to MANY_SOURCE_TO_ONE_TARGET, all matching records in the source are matched to one record in the target. + public let recordMatchingModels: [RecordMatchingModel]? + /// The sets of rules you can use in an ID mapping workflow. The limitations specified for the source and target must be compatible. + public let ruleDefinitionTypes: [IdMappingWorkflowRuleDefinitionType]? + /// The rules for the ID namespace. + public let rules: [Rule]? + + public init(attributeMatchingModel: AttributeMatchingModel? = nil, recordMatchingModels: [RecordMatchingModel]? = nil, ruleDefinitionTypes: [IdMappingWorkflowRuleDefinitionType]? = nil, rules: [Rule]? = nil) { + self.attributeMatchingModel = attributeMatchingModel + self.recordMatchingModels = recordMatchingModels + self.ruleDefinitionTypes = ruleDefinitionTypes + self.rules = rules + } + + public func validate(name: String) throws { + try self.rules?.forEach { + try $0.validate(name: "\(name).rules[]") + } + } + + private enum CodingKeys: String, CodingKey { + case attributeMatchingModel = "attributeMatchingModel" + case recordMatchingModels = "recordMatchingModels" + case ruleDefinitionTypes = "ruleDefinitionTypes" + case rules = "rules" + } + } + public struct OutputAttribute: AWSEncodableShape & AWSDecodableShape { /// Enables the ability to hash the column values in the output. public let hashed: Bool? @@ -2321,7 +2440,7 @@ extension EntityResolution { public let description: String? /// Configurations required for the source ID namespace. public let providerSourceConfigurationDefinition: String? - /// Configurations required for the target ID namespace. + /// Configurations required for the target ID namespace. public let providerTargetConfigurationDefinition: String? public init(description: String? = nil, providerSourceConfigurationDefinition: String? = nil, providerTargetConfigurationDefinition: String? = nil) { @@ -2464,7 +2583,7 @@ extension EntityResolution { public struct PutPolicyInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the resource for which the policy needs to be updated. public let arn: String - /// The resource-based policy. + /// The resource-based policy. If you set the value of the effect parameter in the policy to Deny for the PutPolicy operation, you must also set the value of the effect parameter to Deny for the AddPolicyStatement operation. public let policy: String /// A unique identifier for the current revision of the policy. public let token: String? @@ -2570,13 +2689,16 @@ extension EntityResolution { } public struct RuleBasedProperties: AWSEncodableShape & AWSDecodableShape { - /// The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE ,the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type. + /// The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the attributeMatchingModel. If you choose MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email attribute type. If you choose ONE_TO_ONE, the system can only match attributes if the sub-types are an exact match. For example, for the Email attribute type, the system will only consider it a match if the value of the Email field of Profile A matches the value of the Email field of Profile B. public let attributeMatchingModel: AttributeMatchingModel + /// An indicator of whether to generate IDs and index the data or not. If you choose IDENTIFIER_GENERATION, the process generates IDs and indexes the data. If you choose INDEXING, the process indexes the data without generating IDs. + public let matchPurpose: MatchPurpose? /// A list of Rule objects, each of which have fields RuleName and MatchingKeys. public let rules: [Rule] - public init(attributeMatchingModel: AttributeMatchingModel, rules: [Rule]) { + public init(attributeMatchingModel: AttributeMatchingModel, matchPurpose: MatchPurpose? = nil, rules: [Rule]) { self.attributeMatchingModel = attributeMatchingModel + self.matchPurpose = matchPurpose self.rules = rules } @@ -2588,6 +2710,7 @@ extension EntityResolution { private enum CodingKeys: String, CodingKey { case attributeMatchingModel = "attributeMatchingModel" + case matchPurpose = "matchPurpose" case rules = "rules" } } @@ -2597,16 +2720,19 @@ extension EntityResolution { public let fieldName: String /// A string that instructs Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common groupName will prompt Entity Resolution to concatenate them into a single value. public let groupName: String? - /// A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning a matchKey called address to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group. If no matchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table. + /// Indicates if the column values are hashed in the schema input. If the value is set to TRUE, the column values are hashed. If the value is set to FALSE, the column values are cleartext. + public let hashed: Bool? + /// A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning a matchKey called address to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group. If no matchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table. public let matchKey: String? /// The subtype of the attribute, selected from a list of values. public let subType: String? /// The type of the attribute, selected from a list of values. public let type: SchemaAttributeType - public init(fieldName: String, groupName: String? = nil, matchKey: String? = nil, subType: String? = nil, type: SchemaAttributeType) { + public init(fieldName: String, groupName: String? = nil, hashed: Bool? = nil, matchKey: String? = nil, subType: String? = nil, type: SchemaAttributeType) { self.fieldName = fieldName self.groupName = groupName + self.hashed = hashed self.matchKey = matchKey self.subType = subType self.type = type @@ -2626,6 +2752,7 @@ extension EntityResolution { private enum CodingKeys: String, CodingKey { case fieldName = "fieldName" case groupName = "groupName" + case hashed = "hashed" case matchKey = "matchKey" case subType = "subType" case type = "type" @@ -2820,18 +2947,18 @@ extension EntityResolution { public struct UpdateIdMappingWorkflowInput: AWSEncodableShape { /// A description of the workflow. public let description: String? - /// An object which defines the idMappingType and the providerProperties. + /// An object which defines the ID mapping technique and any additional configurations. public let idMappingTechniques: IdMappingTechniques /// A list of InputSource objects, which have the fields InputSourceARN and SchemaName. public let inputSourceConfig: [IdMappingWorkflowInputSource] /// A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn. public let outputSourceConfig: [IdMappingWorkflowOutputSource]? /// The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf. - public let roleArn: String + public let roleArn: String? /// The name of the workflow. public let workflowName: String - public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String, workflowName: String) { + public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String? = nil, workflowName: String) { self.description = description self.idMappingTechniques = idMappingTechniques self.inputSourceConfig = inputSourceConfig @@ -2847,7 +2974,7 @@ extension EntityResolution { try container.encode(self.idMappingTechniques, forKey: .idMappingTechniques) try container.encode(self.inputSourceConfig, forKey: .inputSourceConfig) try container.encodeIfPresent(self.outputSourceConfig, forKey: .outputSourceConfig) - try container.encode(self.roleArn, forKey: .roleArn) + try container.encodeIfPresent(self.roleArn, forKey: .roleArn) request.encodePath(self.workflowName, key: "workflowName") } @@ -2865,8 +2992,7 @@ extension EntityResolution { try self.validate(self.outputSourceConfig, name: "outputSourceConfig", parent: name, max: 1) try self.validate(self.outputSourceConfig, name: "outputSourceConfig", parent: name, min: 1) try self.validate(self.roleArn, name: "roleArn", parent: name, max: 512) - try self.validate(self.roleArn, name: "roleArn", parent: name, min: 32) - try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^$|^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") try self.validate(self.workflowName, name: "workflowName", parent: name, max: 255) try self.validate(self.workflowName, name: "workflowName", parent: name, min: 1) try self.validate(self.workflowName, name: "workflowName", parent: name, pattern: "^[a-zA-Z_0-9-]*$") @@ -2884,20 +3010,20 @@ extension EntityResolution { public struct UpdateIdMappingWorkflowOutput: AWSDecodableShape { /// A description of the workflow. public let description: String? - /// An object which defines the idMappingType and the providerProperties. + /// An object which defines the ID mapping technique and any additional configurations. public let idMappingTechniques: IdMappingTechniques /// A list of InputSource objects, which have the fields InputSourceARN and SchemaName. public let inputSourceConfig: [IdMappingWorkflowInputSource] /// A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn. public let outputSourceConfig: [IdMappingWorkflowOutputSource]? /// The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf. - public let roleArn: String + public let roleArn: String? /// The Amazon Resource Name (ARN) of the workflow role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf. public let workflowArn: String /// The name of the workflow. public let workflowName: String - public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String, workflowArn: String, workflowName: String) { + public init(description: String? = nil, idMappingTechniques: IdMappingTechniques, inputSourceConfig: [IdMappingWorkflowInputSource], outputSourceConfig: [IdMappingWorkflowOutputSource]? = nil, roleArn: String? = nil, workflowArn: String, workflowName: String) { self.description = description self.idMappingTechniques = idMappingTechniques self.inputSourceConfig = inputSourceConfig @@ -3234,19 +3360,19 @@ public struct EntityResolutionErrorType: AWSErrorType { /// return error code string public var errorCode: String { self.error.rawValue } - /// You do not have sufficient access to perform this action. HTTP Status Code: 403 + /// You do not have sufficient access to perform this action. public static var accessDeniedException: Self { .init(.accessDeniedException) } - /// The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc. HTTP Status Code: 400 + /// The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc. public static var conflictException: Self { .init(.conflictException) } - /// The request was rejected because it attempted to create resources beyond the current Entity Resolution account limits. The error message describes the limit exceeded. HTTP Status Code: 402 + /// The request was rejected because it attempted to create resources beyond the current Entity Resolution account limits. The error message describes the limit exceeded. public static var exceedsLimitException: Self { .init(.exceedsLimitException) } - /// This exception occurs when there is an internal failure in the Entity Resolution service. HTTP Status Code: 500 + /// This exception occurs when there is an internal failure in the Entity Resolution service. public static var internalServerException: Self { .init(.internalServerException) } - /// The resource could not be found. HTTP Status Code: 404 + /// The resource could not be found. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - /// The request was denied due to request throttling. HTTP Status Code: 429 + /// The request was denied due to request throttling. public static var throttlingException: Self { .init(.throttlingException) } - /// The input fails to satisfy the constraints specified by Entity Resolution. HTTP Status Code: 400 + /// The input fails to satisfy the constraints specified by Entity Resolution. public static var validationException: Self { .init(.validationException) } } diff --git a/Sources/Soto/Services/FIS/FIS_api.swift b/Sources/Soto/Services/FIS/FIS_api.swift index c1397608ca..612520c8fe 100644 --- a/Sources/Soto/Services/FIS/FIS_api.swift +++ b/Sources/Soto/Services/FIS/FIS_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS FIS service. /// -/// Fault Injection Service is a managed service that enables you to perform fault injection experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide. +/// Amazon Web Services Fault Injection Service is a managed service that enables you to perform fault injection experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide. public struct FIS: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/FIS/FIS_shapes.swift b/Sources/Soto/Services/FIS/FIS_shapes.swift index f050c42459..da2f9de75f 100644 --- a/Sources/Soto/Services/FIS/FIS_shapes.swift +++ b/Sources/Soto/Services/FIS/FIS_shapes.swift @@ -718,6 +718,27 @@ extension FIS { } } + public struct ExperimentError: AWSDecodableShape { + /// The Amazon Web Services Account ID where the experiment failure occurred. + public let accountId: String? + /// The error code for the failed experiment. + public let code: String? + /// Context for the section of the experiment template that failed. + public let location: String? + + public init(accountId: String? = nil, code: String? = nil, location: String? = nil) { + self.accountId = accountId + self.code = code + self.location = location + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case code = "code" + case location = "location" + } + } + public struct ExperimentLogConfiguration: AWSDecodableShape { /// The configuration for experiment logging to Amazon CloudWatch Logs. public let cloudWatchLogsConfiguration: ExperimentCloudWatchLogsLogConfiguration? @@ -778,17 +799,21 @@ extension FIS { } public struct ExperimentState: AWSDecodableShape { + /// The error information of the experiment when the action has failed. + public let error: ExperimentError? /// The reason for the state. public let reason: String? /// The state of the experiment. public let status: ExperimentStatus? - public init(reason: String? = nil, status: ExperimentStatus? = nil) { + public init(error: ExperimentError? = nil, reason: String? = nil, status: ExperimentStatus? = nil) { + self.error = error self.reason = reason self.status = status } private enum CodingKeys: String, CodingKey { + case error = "error" case reason = "reason" case status = "status" } diff --git a/Sources/Soto/Services/Glue/Glue_api.swift b/Sources/Soto/Services/Glue/Glue_api.swift index aa48a8b1a0..7d40a909bf 100644 --- a/Sources/Soto/Services/Glue/Glue_api.swift +++ b/Sources/Soto/Services/Glue/Glue_api.swift @@ -289,6 +289,19 @@ public struct Glue: AWSService { ) } + /// Annotate datapoints over time for a specific data quality statistic. + @Sendable + public func batchPutDataQualityStatisticAnnotation(_ input: BatchPutDataQualityStatisticAnnotationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchPutDataQualityStatisticAnnotationResponse { + return try await self.client.execute( + operation: "BatchPutDataQualityStatisticAnnotation", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Stops one or more job runs for a specified job definition. @Sendable public func batchStopJobRun(_ input: BatchStopJobRunRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchStopJobRunResponse { @@ -1251,6 +1264,32 @@ public struct Glue: AWSService { ) } + /// Retrieve the training status of the model along with more information (CompletedOn, StartedOn, FailureReason). + @Sendable + public func getDataQualityModel(_ input: GetDataQualityModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataQualityModelResponse { + return try await self.client.execute( + operation: "GetDataQualityModel", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieve a statistic's predictions for a given Profile ID. + @Sendable + public func getDataQualityModelResult(_ input: GetDataQualityModelResultRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataQualityModelResultResponse { + return try await self.client.execute( + operation: "GetDataQualityModelResult", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the result of a data quality rule evaluation. @Sendable public func getDataQualityResult(_ input: GetDataQualityResultRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataQualityResultResponse { @@ -2057,6 +2096,32 @@ public struct Glue: AWSService { ) } + /// Retrieve annotations for a data quality statistic. + @Sendable + public func listDataQualityStatisticAnnotations(_ input: ListDataQualityStatisticAnnotationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataQualityStatisticAnnotationsResponse { + return try await self.client.execute( + operation: "ListDataQualityStatisticAnnotations", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves a list of data quality statistics. + @Sendable + public func listDataQualityStatistics(_ input: ListDataQualityStatisticsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataQualityStatisticsResponse { + return try await self.client.execute( + operation: "ListDataQualityStatistics", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the names of all DevEndpoint resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names. This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved. @Sendable public func listDevEndpoints(_ input: ListDevEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDevEndpointsResponse { @@ -2226,6 +2291,19 @@ public struct Glue: AWSService { ) } + /// Annotate all datapoints for a Profile. + @Sendable + public func putDataQualityProfileAnnotation(_ input: PutDataQualityProfileAnnotationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutDataQualityProfileAnnotationResponse { + return try await self.client.execute( + operation: "PutDataQualityProfileAnnotation", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Sets the Data Catalog resource policy for access control. @Sendable public func putResourcePolicy(_ input: PutResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutResourcePolicyResponse { @@ -3926,9 +4004,11 @@ extension Glue.GetTableVersionsRequest: AWSPaginateToken { extension Glue.GetTablesRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Glue.GetTablesRequest { return .init( + attributesToGet: self.attributesToGet, catalogId: self.catalogId, databaseName: self.databaseName, expression: self.expression, + includeStatusDetails: self.includeStatusDetails, maxResults: self.maxResults, nextToken: token, queryAsOfTime: self.queryAsOfTime, @@ -4187,6 +4267,7 @@ extension Glue.SearchTablesRequest: AWSPaginateToken { return .init( catalogId: self.catalogId, filters: self.filters, + includeStatusDetails: self.includeStatusDetails, maxResults: self.maxResults, nextToken: token, resourceShareType: self.resourceShareType, diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 20e4f77d1d..ea0dd40c6f 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -144,6 +144,7 @@ extension Glue { } public enum ConnectionPropertyKey: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case clusterIdentifier = "CLUSTER_IDENTIFIER" case configFiles = "CONFIG_FILES" case connectionUrl = "CONNECTION_URL" case connectorClassName = "CONNECTOR_CLASS_NAME" @@ -151,6 +152,7 @@ extension Glue { case connectorUrl = "CONNECTOR_URL" case customJdbcCert = "CUSTOM_JDBC_CERT" case customJdbcCertString = "CUSTOM_JDBC_CERT_STRING" + case database = "DATABASE" case encryptedKafkaClientKeyPassword = "ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD" case encryptedKafkaClientKeystorePassword = "ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD" case encryptedKafkaSaslPlainPassword = "ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD" @@ -183,10 +185,12 @@ extension Glue { case kafkaSslEnabled = "KAFKA_SSL_ENABLED" case password = "PASSWORD" case port = "PORT" + case region = "REGION" case roleArn = "ROLE_ARN" case secretId = "SECRET_ID" case skipCustomJdbcCertValidation = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" case userName = "USERNAME" + case workgroupName = "WORKGROUP_NAME" public var description: String { return self.rawValue } } @@ -206,6 +210,8 @@ extension Glue { case network = "NETWORK" case salesforce = "SALESFORCE" case sftp = "SFTP" + case viewValidationAthena = "VIEW_VALIDATION_ATHENA" + case viewValidationRedshift = "VIEW_VALIDATION_REDSHIFT" public var description: String { return self.rawValue } } @@ -279,6 +285,13 @@ extension Glue { public var description: String { return self.rawValue } } + public enum DataQualityModelStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case running = "RUNNING" + case succeeded = "SUCCEEDED" + public var description: String { return self.rawValue } + } + public enum DataQualityRuleResultStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case error = "ERROR" case fail = "FAIL" @@ -387,6 +400,12 @@ extension Glue { public var description: String { return self.rawValue } } + public enum InclusionAnnotationValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case exclude = "EXCLUDE" + case include = "INCLUDE" + public var description: String { return self.rawValue } + } + public enum JDBCConnectionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case mysql = "mysql" case oracle = "oracle" @@ -616,6 +635,12 @@ extension Glue { public var description: String { return self.rawValue } } + public enum ResourceAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case create = "CREATE" + case update = "UPDATE" + public var description: String { return self.rawValue } + } + public enum ResourceShareType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case all = "ALL" case federated = "FEDERATED" @@ -623,6 +648,15 @@ extension Glue { public var description: String { return self.rawValue } } + public enum ResourceState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case queued = "QUEUED" + case stopped = "STOPPED" + case success = "SUCCESS" + public var description: String { return self.rawValue } + } + public enum ResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case archive = "ARCHIVE" case file = "FILE" @@ -727,6 +761,19 @@ extension Glue { public var description: String { return self.rawValue } } + public enum StatisticEvaluationLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case column = "Column" + case dataset = "Dataset" + case multicolumn = "Multicolumn" + public var description: String { return self.rawValue } + } + + public enum TableAttributes: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case name = "NAME" + case tableType = "TABLE_TYPE" + public var description: String { return self.rawValue } + } + public enum TableOptimizerEventType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case completed = "completed" case failed = "failed" @@ -1188,6 +1235,27 @@ extension Glue { } } + public struct AnnotationError: AWSDecodableShape { + /// The reason why the annotation failed. + public let failureReason: String? + /// The Profile ID for the failed annotation. + public let profileId: String? + /// The Statistic ID for the failed annotation. + public let statisticId: String? + + public init(failureReason: String? = nil, profileId: String? = nil, statisticId: String? = nil) { + self.failureReason = failureReason + self.profileId = profileId + self.statisticId = statisticId + } + + private enum CodingKeys: String, CodingKey { + case failureReason = "FailureReason" + case profileId = "ProfileId" + case statisticId = "StatisticId" + } + } + public struct ApplyMapping: AWSEncodableShape & AWSDecodableShape { /// The data inputs identified by their node names. public let inputs: [String] @@ -1396,13 +1464,16 @@ extension Glue { public let inputs: [String] /// The name of your data target. public let name: String + /// The partition keys used to distribute data across multiple partitions or shards based on a specific key or set of key. + public let partitionKeys: [[String]]? /// The table that defines the schema of your output data. This table must already exist in the Data Catalog. public let table: String - public init(database: String, inputs: [String], name: String, table: String) { + public init(database: String, inputs: [String], name: String, partitionKeys: [[String]]? = nil, table: String) { self.database = database self.inputs = inputs self.name = name + self.partitionKeys = partitionKeys self.table = table } @@ -1421,6 +1492,7 @@ extension Glue { case database = "Database" case inputs = "Inputs" case name = "Name" + case partitionKeys = "PartitionKeys" case table = "Table" } } @@ -2169,6 +2241,45 @@ extension Glue { } } + public struct BatchPutDataQualityStatisticAnnotationRequest: AWSEncodableShape { + /// Client Token. + public let clientToken: String? + /// A list of DatapointInclusionAnnotation's. + public let inclusionAnnotations: [DatapointInclusionAnnotation] + + public init(clientToken: String? = nil, inclusionAnnotations: [DatapointInclusionAnnotation]) { + self.clientToken = clientToken + self.inclusionAnnotations = inclusionAnnotations + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 255) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.inclusionAnnotations.forEach { + try $0.validate(name: "\(name).inclusionAnnotations[]") + } + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case inclusionAnnotations = "InclusionAnnotations" + } + } + + public struct BatchPutDataQualityStatisticAnnotationResponse: AWSDecodableShape { + /// A list of AnnotationError's. + public let failedInclusionAnnotations: [AnnotationError]? + + public init(failedInclusionAnnotations: [AnnotationError]? = nil) { + self.failedInclusionAnnotations = failedInclusionAnnotations + } + + private enum CodingKeys: String, CodingKey { + case failedInclusionAnnotations = "FailedInclusionAnnotations" + } + } + public struct BatchStopJobRunError: AWSDecodableShape { /// Specifies details about the error that was encountered. public let errorDetail: ErrorDetail? @@ -3928,7 +4039,7 @@ extension Glue { public struct Connection: AWSDecodableShape { /// The authentication properties of the connection. public let authenticationConfiguration: AuthenticationConfiguration? - /// These key-value pairs define parameters for the connection: HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host. PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections. USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME". PASSWORD - A password, if one is used, for the user name. ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password. JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use. JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. JDBC_ENGINE - The name of the JDBC engine to use. JDBC_ENGINE_VERSION - The version of the JDBC engine to use. CONFIG_FILES - (Reserved for future use.) INSTANCE_ID - The instance ID to use. JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source. JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false. CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format. SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate. CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate. CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source. SECRET_ID - The secret ID used for the secret manager of credentials. CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection. CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection. CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection. KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself. KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is "true". KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string. KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is "false". KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional). KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional). KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional). ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected). ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_MECHANISM - "SCRAM-SHA-512", "GSSAPI", "AWS_MSK_IAM", or "PLAIN". These are the supported SASL Mechanisms. KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the "PLAIN" mechanism. KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the "PLAIN" mechanism. ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the "SCRAM-SHA-512" mechanism. KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the "SCRAM-SHA-512" mechanism. ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager. KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab. KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf. KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration. KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers. + /// These key-value pairs define parameters for the connection: HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host. PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections. USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME". PASSWORD - A password, if one is used, for the user name. ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password. JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use. JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. JDBC_ENGINE - The name of the JDBC engine to use. JDBC_ENGINE_VERSION - The version of the JDBC engine to use. CONFIG_FILES - (Reserved for future use.) INSTANCE_ID - The instance ID to use. JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source. JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false. CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format. SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate. CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate. CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source. SECRET_ID - The secret ID used for the secret manager of credentials. CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection. CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection. CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection. KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself. KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is "true". KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string. KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is "false". KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional). KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional). KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional). ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected). ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_MECHANISM - "SCRAM-SHA-512", "GSSAPI", "AWS_MSK_IAM", or "PLAIN". These are the supported SASL Mechanisms. KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the "PLAIN" mechanism. KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the "PLAIN" mechanism. ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the "SCRAM-SHA-512" mechanism. KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the "SCRAM-SHA-512" mechanism. ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager. KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab. KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf. KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration. KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers. ROLE_ARN - The role to be used for running queries. REGION - The Amazon Web Services Region where queries will be run. WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run. CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in which queries will run. DATABASE - The Amazon Redshift database that you are connecting to. public let connectionProperties: [ConnectionPropertyKey: String]? /// The type of the connection. Currently, SFTP is not supported. public let connectionType: ConnectionType? @@ -3991,7 +4102,7 @@ extension Glue { public let authenticationConfiguration: AuthenticationConfigurationInput? /// These key-value pairs define parameters for the connection. public let connectionProperties: [ConnectionPropertyKey: String] - /// The type of the connection. Currently, these types are supported: JDBC - Designates a connection to a database through Java Database Connectivity (JDBC). JDBC Connections use the following ConnectionParameters. Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC. KAFKA - Designates a connection to an Apache Kafka streaming platform. KAFKA Connections use the following ConnectionParameters. Required: KAFKA_BOOTSTRAP_SERVERS. Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA. Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA. Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM. Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA. Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA. MONGODB - Designates a connection to a MongoDB document database. MONGODB Connections use the following ConnectionParameters. Required: CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. SALESFORCE - Designates a connection to Salesforce using OAuth authencation. Requires the AuthenticationConfiguration member to be configured. NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC). NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements. MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue. MARKETPLACE Connections use the following ConnectionParameters. Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL. Required for JDBC CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID. CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue. SFTP is not supported. For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties. For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections. + /// The type of the connection. Currently, these types are supported: JDBC - Designates a connection to a database through Java Database Connectivity (JDBC). JDBC Connections use the following ConnectionParameters. Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC. KAFKA - Designates a connection to an Apache Kafka streaming platform. KAFKA Connections use the following ConnectionParameters. Required: KAFKA_BOOTSTRAP_SERVERS. Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA. Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA. Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM. Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA. Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA. MONGODB - Designates a connection to a MongoDB document database. MONGODB Connections use the following ConnectionParameters. Required: CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. SALESFORCE - Designates a connection to Salesforce using OAuth authencation. Requires the AuthenticationConfiguration member to be configured. VIEW_VALIDATION_REDSHIFT - Designates a connection used for view validation by Amazon Redshift. VIEW_VALIDATION_ATHENA - Designates a connection used for view validation by Amazon Athena. NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC). NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements. MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue. MARKETPLACE Connections use the following ConnectionParameters. Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL. Required for JDBC CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID. CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue. SFTP is not supported. For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties. For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections. public let connectionType: ConnectionType /// The description of the connection. public let description: String? @@ -4814,6 +4925,8 @@ extension Glue { public struct CreateDataQualityRulesetRequest: AWSEncodableShape { /// Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource. public let clientToken: String? + /// The name of the security configuration created with the data quality encryption option. + public let dataQualitySecurityConfiguration: String? /// A description of the data quality ruleset. public let description: String? /// A unique name for the data quality ruleset. @@ -4825,8 +4938,9 @@ extension Glue { /// A target table associated with the data quality ruleset. public let targetTable: DataQualityTargetTable? - public init(clientToken: String? = nil, description: String? = nil, name: String, ruleset: String, tags: [String: String]? = nil, targetTable: DataQualityTargetTable? = nil) { + public init(clientToken: String? = nil, dataQualitySecurityConfiguration: String? = nil, description: String? = nil, name: String, ruleset: String, tags: [String: String]? = nil, targetTable: DataQualityTargetTable? = nil) { self.clientToken = clientToken + self.dataQualitySecurityConfiguration = dataQualitySecurityConfiguration self.description = description self.name = name self.ruleset = ruleset @@ -4838,6 +4952,9 @@ extension Glue { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 255) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.dataQualitySecurityConfiguration, name: "dataQualitySecurityConfiguration", parent: name, max: 255) + try self.validate(self.dataQualitySecurityConfiguration, name: "dataQualitySecurityConfiguration", parent: name, min: 1) + try self.validate(self.dataQualitySecurityConfiguration, name: "dataQualitySecurityConfiguration", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") try self.validate(self.description, name: "description", parent: name, max: 2048) try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") try self.validate(self.name, name: "name", parent: name, max: 255) @@ -4856,6 +4973,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case clientToken = "ClientToken" + case dataQualitySecurityConfiguration = "DataQualitySecurityConfiguration" case description = "Description" case name = "Name" case ruleset = "Ruleset" @@ -6559,6 +6677,8 @@ extension Glue { public let jobRunId: String? /// A list of DataQualityObservation objects representing the observations generated after evaluating the rules and analyzers. public let observations: [DataQualityObservation]? + /// The Profile ID for the data quality result. + public let profileId: String? /// A unique result ID for the data quality result. public let resultId: String? /// A list of DataQualityRuleResult objects representing the results for each rule. @@ -6572,7 +6692,7 @@ extension Glue { /// The date and time when this data quality run started. public let startedOn: Date? - public init(analyzerResults: [DataQualityAnalyzerResult]? = nil, completedOn: Date? = nil, dataSource: DataSource? = nil, evaluationContext: String? = nil, jobName: String? = nil, jobRunId: String? = nil, observations: [DataQualityObservation]? = nil, resultId: String? = nil, ruleResults: [DataQualityRuleResult]? = nil, rulesetEvaluationRunId: String? = nil, rulesetName: String? = nil, score: Double? = nil, startedOn: Date? = nil) { + public init(analyzerResults: [DataQualityAnalyzerResult]? = nil, completedOn: Date? = nil, dataSource: DataSource? = nil, evaluationContext: String? = nil, jobName: String? = nil, jobRunId: String? = nil, observations: [DataQualityObservation]? = nil, profileId: String? = nil, resultId: String? = nil, ruleResults: [DataQualityRuleResult]? = nil, rulesetEvaluationRunId: String? = nil, rulesetName: String? = nil, score: Double? = nil, startedOn: Date? = nil) { self.analyzerResults = analyzerResults self.completedOn = completedOn self.dataSource = dataSource @@ -6580,6 +6700,7 @@ extension Glue { self.jobName = jobName self.jobRunId = jobRunId self.observations = observations + self.profileId = profileId self.resultId = resultId self.ruleResults = ruleResults self.rulesetEvaluationRunId = rulesetEvaluationRunId @@ -6596,6 +6717,7 @@ extension Glue { case jobName = "JobName" case jobRunId = "JobRunId" case observations = "Observations" + case profileId = "ProfileId" case resultId = "ResultId" case ruleResults = "RuleResults" case rulesetEvaluationRunId = "RulesetEvaluationRunId" @@ -6728,6 +6850,8 @@ extension Glue { public let description: String? /// A map of metrics associated with the evaluation of the rule. public let evaluatedMetrics: [String: Double]? + /// The evaluated rule. + public let evaluatedRule: String? /// An evaluation message. public let evaluationMessage: String? /// The name of the data quality rule. @@ -6735,9 +6859,10 @@ extension Glue { /// A pass or fail status for the rule. public let result: DataQualityRuleResultStatus? - public init(description: String? = nil, evaluatedMetrics: [String: Double]? = nil, evaluationMessage: String? = nil, name: String? = nil, result: DataQualityRuleResultStatus? = nil) { + public init(description: String? = nil, evaluatedMetrics: [String: Double]? = nil, evaluatedRule: String? = nil, evaluationMessage: String? = nil, name: String? = nil, result: DataQualityRuleResultStatus? = nil) { self.description = description self.evaluatedMetrics = evaluatedMetrics + self.evaluatedRule = evaluatedRule self.evaluationMessage = evaluationMessage self.name = name self.result = result @@ -6746,6 +6871,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case description = "Description" case evaluatedMetrics = "EvaluatedMetrics" + case evaluatedRule = "EvaluatedRule" case evaluationMessage = "EvaluationMessage" case name = "Name" case result = "Result" @@ -7072,6 +7198,36 @@ extension Glue { } } + public struct DatapointInclusionAnnotation: AWSEncodableShape { + /// The inclusion annotation value to apply to the statistic. + public let inclusionAnnotation: InclusionAnnotationValue? + /// The ID of the data quality profile the statistic belongs to. + public let profileId: String? + /// The Statistic ID. + public let statisticId: String? + + public init(inclusionAnnotation: InclusionAnnotationValue? = nil, profileId: String? = nil, statisticId: String? = nil) { + self.inclusionAnnotation = inclusionAnnotation + self.profileId = profileId + self.statisticId = statisticId + } + + public func validate(name: String) throws { + try self.validate(self.profileId, name: "profileId", parent: name, max: 255) + try self.validate(self.profileId, name: "profileId", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.statisticId, name: "statisticId", parent: name, max: 255) + try self.validate(self.statisticId, name: "statisticId", parent: name, min: 1) + try self.validate(self.statisticId, name: "statisticId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case inclusionAnnotation = "InclusionAnnotation" + case profileId = "ProfileId" + case statisticId = "StatisticId" + } + } + public struct Datatype: AWSEncodableShape & AWSDecodableShape { /// The datatype of the value. public let id: String @@ -9902,6 +10058,100 @@ extension Glue { } } + public struct GetDataQualityModelRequest: AWSEncodableShape { + /// The Profile ID. + public let profileId: String + /// The Statistic ID. + public let statisticId: String? + + public init(profileId: String, statisticId: String? = nil) { + self.profileId = profileId + self.statisticId = statisticId + } + + public func validate(name: String) throws { + try self.validate(self.profileId, name: "profileId", parent: name, max: 255) + try self.validate(self.profileId, name: "profileId", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.statisticId, name: "statisticId", parent: name, max: 255) + try self.validate(self.statisticId, name: "statisticId", parent: name, min: 1) + try self.validate(self.statisticId, name: "statisticId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case profileId = "ProfileId" + case statisticId = "StatisticId" + } + } + + public struct GetDataQualityModelResponse: AWSDecodableShape { + /// The timestamp when the data quality model training completed. + public let completedOn: Date? + /// The training failure reason. + public let failureReason: String? + /// The timestamp when the data quality model training started. + public let startedOn: Date? + /// The training status of the data quality model. + public let status: DataQualityModelStatus? + + public init(completedOn: Date? = nil, failureReason: String? = nil, startedOn: Date? = nil, status: DataQualityModelStatus? = nil) { + self.completedOn = completedOn + self.failureReason = failureReason + self.startedOn = startedOn + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case completedOn = "CompletedOn" + case failureReason = "FailureReason" + case startedOn = "StartedOn" + case status = "Status" + } + } + + public struct GetDataQualityModelResultRequest: AWSEncodableShape { + /// The Profile ID. + public let profileId: String + /// The Statistic ID. + public let statisticId: String + + public init(profileId: String, statisticId: String) { + self.profileId = profileId + self.statisticId = statisticId + } + + public func validate(name: String) throws { + try self.validate(self.profileId, name: "profileId", parent: name, max: 255) + try self.validate(self.profileId, name: "profileId", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.statisticId, name: "statisticId", parent: name, max: 255) + try self.validate(self.statisticId, name: "statisticId", parent: name, min: 1) + try self.validate(self.statisticId, name: "statisticId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case profileId = "ProfileId" + case statisticId = "StatisticId" + } + } + + public struct GetDataQualityModelResultResponse: AWSDecodableShape { + /// The timestamp when the data quality model training completed. + public let completedOn: Date? + /// A list of StatisticModelResult + public let model: [StatisticModelResult]? + + public init(completedOn: Date? = nil, model: [StatisticModelResult]? = nil) { + self.completedOn = completedOn + self.model = model + } + + private enum CodingKeys: String, CodingKey { + case completedOn = "CompletedOn" + case model = "Model" + } + } + public struct GetDataQualityResultRequest: AWSEncodableShape { /// A unique result ID for the data quality result. public let resultId: String @@ -9936,6 +10186,8 @@ extension Glue { public let jobRunId: String? /// A list of DataQualityObservation objects representing the observations generated after evaluating the rules and analyzers. public let observations: [DataQualityObservation]? + /// The Profile ID for the data quality result. + public let profileId: String? /// A unique result ID for the data quality result. public let resultId: String? /// A list of DataQualityRuleResult objects representing the results for each rule. @@ -9949,7 +10201,7 @@ extension Glue { /// The date and time when the run for this data quality result started. public let startedOn: Date? - public init(analyzerResults: [DataQualityAnalyzerResult]? = nil, completedOn: Date? = nil, dataSource: DataSource? = nil, evaluationContext: String? = nil, jobName: String? = nil, jobRunId: String? = nil, observations: [DataQualityObservation]? = nil, resultId: String? = nil, ruleResults: [DataQualityRuleResult]? = nil, rulesetEvaluationRunId: String? = nil, rulesetName: String? = nil, score: Double? = nil, startedOn: Date? = nil) { + public init(analyzerResults: [DataQualityAnalyzerResult]? = nil, completedOn: Date? = nil, dataSource: DataSource? = nil, evaluationContext: String? = nil, jobName: String? = nil, jobRunId: String? = nil, observations: [DataQualityObservation]? = nil, profileId: String? = nil, resultId: String? = nil, ruleResults: [DataQualityRuleResult]? = nil, rulesetEvaluationRunId: String? = nil, rulesetName: String? = nil, score: Double? = nil, startedOn: Date? = nil) { self.analyzerResults = analyzerResults self.completedOn = completedOn self.dataSource = dataSource @@ -9957,6 +10209,7 @@ extension Glue { self.jobName = jobName self.jobRunId = jobRunId self.observations = observations + self.profileId = profileId self.resultId = resultId self.ruleResults = ruleResults self.rulesetEvaluationRunId = rulesetEvaluationRunId @@ -9973,6 +10226,7 @@ extension Glue { case jobName = "JobName" case jobRunId = "JobRunId" case observations = "Observations" + case profileId = "ProfileId" case resultId = "ResultId" case ruleResults = "RuleResults" case rulesetEvaluationRunId = "RulesetEvaluationRunId" @@ -10006,6 +10260,8 @@ extension Glue { public let completedOn: Date? /// The name of the ruleset that was created by the run. public let createdRulesetName: String? + /// The name of the security configuration created with the data quality encryption option. + public let dataQualitySecurityConfiguration: String? /// The data source (an Glue table) associated with this run. public let dataSource: DataSource? /// The error strings that are associated with the run. @@ -10029,9 +10285,10 @@ extension Glue { /// The timeout for a run in minutes. This is the maximum time that a run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). public let timeout: Int? - public init(completedOn: Date? = nil, createdRulesetName: String? = nil, dataSource: DataSource? = nil, errorString: String? = nil, executionTime: Int? = nil, lastModifiedOn: Date? = nil, numberOfWorkers: Int? = nil, recommendedRuleset: String? = nil, role: String? = nil, runId: String? = nil, startedOn: Date? = nil, status: TaskStatusType? = nil, timeout: Int? = nil) { + public init(completedOn: Date? = nil, createdRulesetName: String? = nil, dataQualitySecurityConfiguration: String? = nil, dataSource: DataSource? = nil, errorString: String? = nil, executionTime: Int? = nil, lastModifiedOn: Date? = nil, numberOfWorkers: Int? = nil, recommendedRuleset: String? = nil, role: String? = nil, runId: String? = nil, startedOn: Date? = nil, status: TaskStatusType? = nil, timeout: Int? = nil) { self.completedOn = completedOn self.createdRulesetName = createdRulesetName + self.dataQualitySecurityConfiguration = dataQualitySecurityConfiguration self.dataSource = dataSource self.errorString = errorString self.executionTime = executionTime @@ -10048,6 +10305,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case completedOn = "CompletedOn" case createdRulesetName = "CreatedRulesetName" + case dataQualitySecurityConfiguration = "DataQualitySecurityConfiguration" case dataSource = "DataSource" case errorString = "ErrorString" case executionTime = "ExecutionTime" @@ -10172,6 +10430,8 @@ extension Glue { public struct GetDataQualityRulesetResponse: AWSDecodableShape { /// A timestamp. The time and date that this data quality ruleset was created. public let createdOn: Date? + /// The name of the security configuration created with the data quality encryption option. + public let dataQualitySecurityConfiguration: String? /// A description of the ruleset. public let description: String? /// A timestamp. The last point in time when this data quality ruleset was modified. @@ -10185,8 +10445,9 @@ extension Glue { /// The name and database name of the target table. public let targetTable: DataQualityTargetTable? - public init(createdOn: Date? = nil, description: String? = nil, lastModifiedOn: Date? = nil, name: String? = nil, recommendationRunId: String? = nil, ruleset: String? = nil, targetTable: DataQualityTargetTable? = nil) { + public init(createdOn: Date? = nil, dataQualitySecurityConfiguration: String? = nil, description: String? = nil, lastModifiedOn: Date? = nil, name: String? = nil, recommendationRunId: String? = nil, ruleset: String? = nil, targetTable: DataQualityTargetTable? = nil) { self.createdOn = createdOn + self.dataQualitySecurityConfiguration = dataQualitySecurityConfiguration self.description = description self.lastModifiedOn = lastModifiedOn self.name = name @@ -10197,6 +10458,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case createdOn = "CreatedOn" + case dataQualitySecurityConfiguration = "DataQualitySecurityConfiguration" case description = "Description" case lastModifiedOn = "LastModifiedOn" case name = "Name" @@ -11752,6 +12014,8 @@ extension Glue { public let catalogId: String? /// The name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase. public let databaseName: String + /// Specifies whether to include status details related to a request to create or update an Glue Data Catalog view. + public let includeStatusDetails: Bool? /// The name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase. public let name: String /// The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId. @@ -11759,9 +12023,10 @@ extension Glue { /// The transaction ID at which to read the table contents. public let transactionId: String? - public init(catalogId: String? = nil, databaseName: String, name: String, queryAsOfTime: Date? = nil, transactionId: String? = nil) { + public init(catalogId: String? = nil, databaseName: String, includeStatusDetails: Bool? = nil, name: String, queryAsOfTime: Date? = nil, transactionId: String? = nil) { self.catalogId = catalogId self.databaseName = databaseName + self.includeStatusDetails = includeStatusDetails self.name = name self.queryAsOfTime = queryAsOfTime self.transactionId = transactionId @@ -11785,6 +12050,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case catalogId = "CatalogId" case databaseName = "DatabaseName" + case includeStatusDetails = "IncludeStatusDetails" case name = "Name" case queryAsOfTime = "QueryAsOfTime" case transactionId = "TransactionId" @@ -11918,12 +12184,16 @@ extension Glue { } public struct GetTablesRequest: AWSEncodableShape { + /// Specifies the table fields returned by the GetTables call. This parameter doesn’t accept an empty list. The request must include NAME. The following are the valid combinations of values: NAME - Names of all tables in the database. NAME, TABLE_TYPE - Names of all tables and the table types. + public let attributesToGet: [TableAttributes]? /// The ID of the Data Catalog where the tables reside. If none is provided, the Amazon Web Services account ID is used by default. public let catalogId: String? /// The database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase. public let databaseName: String /// A regular expression pattern. If present, only those tables whose names match the pattern are returned. public let expression: String? + /// Specifies whether to include status details related to a request to create or update an Glue Data Catalog view. + public let includeStatusDetails: Bool? /// The maximum number of tables to return in a single response. public let maxResults: Int? /// A continuation token, included if this is a continuation call. @@ -11933,10 +12203,12 @@ extension Glue { /// The transaction ID at which to read the table contents. public let transactionId: String? - public init(catalogId: String? = nil, databaseName: String, expression: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, queryAsOfTime: Date? = nil, transactionId: String? = nil) { + public init(attributesToGet: [TableAttributes]? = nil, catalogId: String? = nil, databaseName: String, expression: String? = nil, includeStatusDetails: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, queryAsOfTime: Date? = nil, transactionId: String? = nil) { + self.attributesToGet = attributesToGet self.catalogId = catalogId self.databaseName = databaseName self.expression = expression + self.includeStatusDetails = includeStatusDetails self.maxResults = maxResults self.nextToken = nextToken self.queryAsOfTime = queryAsOfTime @@ -11960,9 +12232,11 @@ extension Glue { } private enum CodingKeys: String, CodingKey { + case attributesToGet = "AttributesToGet" case catalogId = "CatalogId" case databaseName = "DatabaseName" case expression = "Expression" + case includeStatusDetails = "IncludeStatusDetails" case maxResults = "MaxResults" case nextToken = "NextToken" case queryAsOfTime = "QueryAsOfTime" @@ -14655,6 +14929,120 @@ extension Glue { } } + public struct ListDataQualityStatisticAnnotationsRequest: AWSEncodableShape { + /// The maximum number of results to return in this request. + public let maxResults: Int? + /// A pagination token to retrieve the next set of results. + public let nextToken: String? + /// The Profile ID. + public let profileId: String? + /// The Statistic ID. + public let statisticId: String? + /// A timestamp filter. + public let timestampFilter: TimestampFilter? + + public init(maxResults: Int? = nil, nextToken: String? = nil, profileId: String? = nil, statisticId: String? = nil, timestampFilter: TimestampFilter? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.profileId = profileId + self.statisticId = statisticId + self.timestampFilter = timestampFilter + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, max: 255) + try self.validate(self.profileId, name: "profileId", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.statisticId, name: "statisticId", parent: name, max: 255) + try self.validate(self.statisticId, name: "statisticId", parent: name, min: 1) + try self.validate(self.statisticId, name: "statisticId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case profileId = "ProfileId" + case statisticId = "StatisticId" + case timestampFilter = "TimestampFilter" + } + } + + public struct ListDataQualityStatisticAnnotationsResponse: AWSDecodableShape { + /// A list of StatisticAnnotation applied to the Statistic + public let annotations: [StatisticAnnotation]? + /// A pagination token to retrieve the next set of results. + public let nextToken: String? + + public init(annotations: [StatisticAnnotation]? = nil, nextToken: String? = nil) { + self.annotations = annotations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case annotations = "Annotations" + case nextToken = "NextToken" + } + } + + public struct ListDataQualityStatisticsRequest: AWSEncodableShape { + /// The maximum number of results to return in this request. + public let maxResults: Int? + /// A pagination token to request the next page of results. + public let nextToken: String? + /// The Profile ID. + public let profileId: String? + /// The Statistic ID. + public let statisticId: String? + /// A timestamp filter. + public let timestampFilter: TimestampFilter? + + public init(maxResults: Int? = nil, nextToken: String? = nil, profileId: String? = nil, statisticId: String? = nil, timestampFilter: TimestampFilter? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.profileId = profileId + self.statisticId = statisticId + self.timestampFilter = timestampFilter + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, max: 255) + try self.validate(self.profileId, name: "profileId", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.statisticId, name: "statisticId", parent: name, max: 255) + try self.validate(self.statisticId, name: "statisticId", parent: name, min: 1) + try self.validate(self.statisticId, name: "statisticId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case profileId = "ProfileId" + case statisticId = "StatisticId" + case timestampFilter = "TimestampFilter" + } + } + + public struct ListDataQualityStatisticsResponse: AWSDecodableShape { + /// A pagination token to request the next page of results. + public let nextToken: String? + /// A StatisticSummaryList. + public let statistics: [StatisticSummary]? + + public init(nextToken: String? = nil, statistics: [StatisticSummary]? = nil) { + self.nextToken = nextToken + self.statistics = statistics + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case statistics = "Statistics" + } + } + public struct ListDevEndpointsRequest: AWSEncodableShape { /// The maximum size of a list to return. public let maxResults: Int? @@ -15590,17 +15978,21 @@ extension Glue { public let metricValues: DataQualityMetricValues? /// A list of new data quality rules generated as part of the observation based on the data quality metric value. public let newRules: [String]? + /// The Statistic ID. + public let statisticId: String? - public init(metricName: String? = nil, metricValues: DataQualityMetricValues? = nil, newRules: [String]? = nil) { + public init(metricName: String? = nil, metricValues: DataQualityMetricValues? = nil, newRules: [String]? = nil, statisticId: String? = nil) { self.metricName = metricName self.metricValues = metricValues self.newRules = newRules + self.statisticId = statisticId } private enum CodingKeys: String, CodingKey { case metricName = "MetricName" case metricValues = "MetricValues" case newRules = "NewRules" + case statisticId = "StatisticId" } } @@ -16567,6 +16959,33 @@ extension Glue { public init() {} } + public struct PutDataQualityProfileAnnotationRequest: AWSEncodableShape { + /// The inclusion annotation value to apply to the profile. + public let inclusionAnnotation: InclusionAnnotationValue + /// The ID of the data quality monitoring profile to annotate. + public let profileId: String + + public init(inclusionAnnotation: InclusionAnnotationValue, profileId: String) { + self.inclusionAnnotation = inclusionAnnotation + self.profileId = profileId + } + + public func validate(name: String) throws { + try self.validate(self.profileId, name: "profileId", parent: name, max: 255) + try self.validate(self.profileId, name: "profileId", parent: name, min: 1) + try self.validate(self.profileId, name: "profileId", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case inclusionAnnotation = "InclusionAnnotation" + case profileId = "ProfileId" + } + } + + public struct PutDataQualityProfileAnnotationResponse: AWSDecodableShape { + public init() {} + } + public struct PutResourcePolicyRequest: AWSEncodableShape { /// If 'TRUE', indicates that you are using both methods to grant cross-account access to Data Catalog resources: By directly updating the resource policy with PutResourePolicy By using the Grant permissions command on the Amazon Web Services Management Console. Must be set to 'TRUE' if you have already used the Management Console to grant cross-account access, otherwise the call fails. Default is 'FALSE'. public let enableHybrid: EnableHybridValues? @@ -17410,6 +17829,23 @@ extension Glue { } } + public struct RunIdentifier: AWSDecodableShape { + /// The Job Run ID. + public let jobRunId: String? + /// The Run ID. + public let runId: String? + + public init(jobRunId: String? = nil, runId: String? = nil) { + self.jobRunId = jobRunId + self.runId = runId + } + + private enum CodingKeys: String, CodingKey { + case jobRunId = "JobRunId" + case runId = "RunId" + } + } + public struct RunMetrics: AWSDecodableShape { /// The duration of the job in hours. public let jobDurationInHour: String? @@ -18627,6 +19063,8 @@ extension Glue { public let catalogId: String? /// A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate. The Comparator member of the PropertyPredicate struct is used only for time fields, and can be omitted for other field types. Also, when comparing string values, such as when Key=Name, a fuzzy match algorithm is used. The Key field (for example, the value of the Name field) is split on certain punctuation characters, for example, -, :, #, etc. into tokens. Then each token is exact-match compared with the Value member of PropertyPredicate. For example, if Key=Name and Value=link, tables named customer-link and xx-link-yy are returned, but xxlinkyy is not returned. public let filters: [PropertyPredicate]? + /// Specifies whether to include status details related to a request to create or update an Glue Data Catalog view. + public let includeStatusDetails: Bool? /// The maximum number of tables to return in a single response. public let maxResults: Int? /// A continuation token, included if this is a continuation call. @@ -18638,9 +19076,10 @@ extension Glue { /// A list of criteria for sorting the results by a field name, in an ascending or descending order. public let sortCriteria: [SortCriterion]? - public init(catalogId: String? = nil, filters: [PropertyPredicate]? = nil, maxResults: Int? = nil, nextToken: String? = nil, resourceShareType: ResourceShareType? = nil, searchText: String? = nil, sortCriteria: [SortCriterion]? = nil) { + public init(catalogId: String? = nil, filters: [PropertyPredicate]? = nil, includeStatusDetails: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, resourceShareType: ResourceShareType? = nil, searchText: String? = nil, sortCriteria: [SortCriterion]? = nil) { self.catalogId = catalogId self.filters = filters + self.includeStatusDetails = includeStatusDetails self.maxResults = maxResults self.nextToken = nextToken self.resourceShareType = resourceShareType @@ -18667,6 +19106,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case catalogId = "CatalogId" case filters = "Filters" + case includeStatusDetails = "IncludeStatusDetails" case maxResults = "MaxResults" case nextToken = "NextToken" case resourceShareType = "ResourceShareType" @@ -19639,6 +20079,8 @@ extension Glue { public let clientToken: String? /// A name for the ruleset. public let createdRulesetName: String? + /// The name of the security configuration created with the data quality encryption option. + public let dataQualitySecurityConfiguration: String? /// The data source (Glue table) associated with this run. public let dataSource: DataSource /// The number of G.1X workers to be used in the run. The default is 5. @@ -19648,9 +20090,10 @@ extension Glue { /// The timeout for a run in minutes. This is the maximum time that a run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). public let timeout: Int? - public init(clientToken: String? = nil, createdRulesetName: String? = nil, dataSource: DataSource, numberOfWorkers: Int? = nil, role: String, timeout: Int? = nil) { + public init(clientToken: String? = nil, createdRulesetName: String? = nil, dataQualitySecurityConfiguration: String? = nil, dataSource: DataSource, numberOfWorkers: Int? = nil, role: String, timeout: Int? = nil) { self.clientToken = clientToken self.createdRulesetName = createdRulesetName + self.dataQualitySecurityConfiguration = dataQualitySecurityConfiguration self.dataSource = dataSource self.numberOfWorkers = numberOfWorkers self.role = role @@ -19664,6 +20107,9 @@ extension Glue { try self.validate(self.createdRulesetName, name: "createdRulesetName", parent: name, max: 255) try self.validate(self.createdRulesetName, name: "createdRulesetName", parent: name, min: 1) try self.validate(self.createdRulesetName, name: "createdRulesetName", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.dataQualitySecurityConfiguration, name: "dataQualitySecurityConfiguration", parent: name, max: 255) + try self.validate(self.dataQualitySecurityConfiguration, name: "dataQualitySecurityConfiguration", parent: name, min: 1) + try self.validate(self.dataQualitySecurityConfiguration, name: "dataQualitySecurityConfiguration", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") try self.dataSource.validate(name: "\(name).dataSource") try self.validate(self.timeout, name: "timeout", parent: name, min: 1) } @@ -19671,6 +20117,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case clientToken = "ClientToken" case createdRulesetName = "CreatedRulesetName" + case dataQualitySecurityConfiguration = "DataQualitySecurityConfiguration" case dataSource = "DataSource" case numberOfWorkers = "NumberOfWorkers" case role = "Role" @@ -20178,6 +20625,134 @@ extension Glue { } } + public struct StatisticAnnotation: AWSDecodableShape { + /// The inclusion annotation applied to the statistic. + public let inclusionAnnotation: TimestampedInclusionAnnotation? + /// The Profile ID. + public let profileId: String? + /// The Statistic ID. + public let statisticId: String? + /// The timestamp when the annotated statistic was recorded. + public let statisticRecordedOn: Date? + + public init(inclusionAnnotation: TimestampedInclusionAnnotation? = nil, profileId: String? = nil, statisticId: String? = nil, statisticRecordedOn: Date? = nil) { + self.inclusionAnnotation = inclusionAnnotation + self.profileId = profileId + self.statisticId = statisticId + self.statisticRecordedOn = statisticRecordedOn + } + + private enum CodingKeys: String, CodingKey { + case inclusionAnnotation = "InclusionAnnotation" + case profileId = "ProfileId" + case statisticId = "StatisticId" + case statisticRecordedOn = "StatisticRecordedOn" + } + } + + public struct StatisticModelResult: AWSDecodableShape { + /// The actual value. + public let actualValue: Double? + /// The date. + public let date: Date? + /// The inclusion annotation. + public let inclusionAnnotation: InclusionAnnotationValue? + /// The lower bound. + public let lowerBound: Double? + /// The predicted value. + public let predictedValue: Double? + /// The upper bound. + public let upperBound: Double? + + public init(actualValue: Double? = nil, date: Date? = nil, inclusionAnnotation: InclusionAnnotationValue? = nil, lowerBound: Double? = nil, predictedValue: Double? = nil, upperBound: Double? = nil) { + self.actualValue = actualValue + self.date = date + self.inclusionAnnotation = inclusionAnnotation + self.lowerBound = lowerBound + self.predictedValue = predictedValue + self.upperBound = upperBound + } + + private enum CodingKeys: String, CodingKey { + case actualValue = "ActualValue" + case date = "Date" + case inclusionAnnotation = "InclusionAnnotation" + case lowerBound = "LowerBound" + case predictedValue = "PredictedValue" + case upperBound = "UpperBound" + } + } + + public struct StatisticSummary: AWSDecodableShape { + /// The list of columns referenced by the statistic. + public let columnsReferenced: [String]? + /// The value of the statistic. + public let doubleValue: Double? + /// The evaluation level of the statistic. Possible values: Dataset, Column, Multicolumn. + public let evaluationLevel: StatisticEvaluationLevel? + /// The inclusion annotation for the statistic. + public let inclusionAnnotation: TimestampedInclusionAnnotation? + /// The Profile ID. + public let profileId: String? + /// The timestamp when the statistic was recorded. + public let recordedOn: Date? + /// The list of datasets referenced by the statistic. + public let referencedDatasets: [String]? + /// The Run Identifier + public let runIdentifier: RunIdentifier? + /// The Statistic ID. + public let statisticId: String? + /// The name of the statistic. + public let statisticName: String? + /// A StatisticPropertiesMap, which contains a NameString and DescriptionString + public let statisticProperties: [String: String]? + + public init(columnsReferenced: [String]? = nil, doubleValue: Double? = nil, evaluationLevel: StatisticEvaluationLevel? = nil, inclusionAnnotation: TimestampedInclusionAnnotation? = nil, profileId: String? = nil, recordedOn: Date? = nil, referencedDatasets: [String]? = nil, runIdentifier: RunIdentifier? = nil, statisticId: String? = nil, statisticName: String? = nil, statisticProperties: [String: String]? = nil) { + self.columnsReferenced = columnsReferenced + self.doubleValue = doubleValue + self.evaluationLevel = evaluationLevel + self.inclusionAnnotation = inclusionAnnotation + self.profileId = profileId + self.recordedOn = recordedOn + self.referencedDatasets = referencedDatasets + self.runIdentifier = runIdentifier + self.statisticId = statisticId + self.statisticName = statisticName + self.statisticProperties = statisticProperties + } + + private enum CodingKeys: String, CodingKey { + case columnsReferenced = "ColumnsReferenced" + case doubleValue = "DoubleValue" + case evaluationLevel = "EvaluationLevel" + case inclusionAnnotation = "InclusionAnnotation" + case profileId = "ProfileId" + case recordedOn = "RecordedOn" + case referencedDatasets = "ReferencedDatasets" + case runIdentifier = "RunIdentifier" + case statisticId = "StatisticId" + case statisticName = "StatisticName" + case statisticProperties = "StatisticProperties" + } + } + + public final class StatusDetails: AWSDecodableShape { + /// A Table object representing the requested changes. + public let requestedChange: Table? + /// A list of ViewValidation objects that contain information for an analytical engine to validate a view. + public let viewValidations: [ViewValidation]? + + public init(requestedChange: Table? = nil, viewValidations: [ViewValidation]? = nil) { + self.requestedChange = requestedChange + self.viewValidations = viewValidations + } + + private enum CodingKeys: String, CodingKey { + case requestedChange = "RequestedChange" + case viewValidations = "ViewValidations" + } + } + public struct StopColumnStatisticsTaskRunRequest: AWSEncodableShape { /// The name of the database where the table resides. public let databaseName: String @@ -20526,7 +21101,7 @@ extension Glue { } } - public struct Table: AWSDecodableShape { + public final class Table: AWSDecodableShape { /// The ID of the Data Catalog in which the table resides. public let catalogId: String? /// The person or entity who created the table. @@ -20557,6 +21132,7 @@ extension Glue { public let partitionKeys: [Column]? /// The retention time for this table. public let retention: Int? + public let status: TableStatus? /// A storage descriptor containing information about the physical storage of this table. public let storageDescriptor: StorageDescriptor? /// The type of this table. Glue will create tables with the EXTERNAL_TABLE type. Other services, such as Athena, may create tables with additional table types. Glue related table types: EXTERNAL_TABLE Hive compatible attribute - indicates a non-Hive managed table. GOVERNED Used by Lake Formation. The Glue Data Catalog understands GOVERNED. @@ -20574,7 +21150,7 @@ extension Glue { /// Included for Apache Hive compatibility. Not used in the normal course of Glue operations. If the table is a VIRTUAL_VIEW, certain Athena configuration encoded in base64. public let viewOriginalText: String? - public init(catalogId: String? = nil, createdBy: String? = nil, createTime: Date? = nil, databaseName: String? = nil, description: String? = nil, federatedTable: FederatedTable? = nil, isMultiDialectView: Bool? = nil, isRegisteredWithLakeFormation: Bool? = nil, lastAccessTime: Date? = nil, lastAnalyzedTime: Date? = nil, name: String, owner: String? = nil, parameters: [String: String]? = nil, partitionKeys: [Column]? = nil, retention: Int? = nil, storageDescriptor: StorageDescriptor? = nil, tableType: String? = nil, targetTable: TableIdentifier? = nil, updateTime: Date? = nil, versionId: String? = nil, viewDefinition: ViewDefinition? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { + public init(catalogId: String? = nil, createdBy: String? = nil, createTime: Date? = nil, databaseName: String? = nil, description: String? = nil, federatedTable: FederatedTable? = nil, isMultiDialectView: Bool? = nil, isRegisteredWithLakeFormation: Bool? = nil, lastAccessTime: Date? = nil, lastAnalyzedTime: Date? = nil, name: String, owner: String? = nil, parameters: [String: String]? = nil, partitionKeys: [Column]? = nil, retention: Int? = nil, status: TableStatus? = nil, storageDescriptor: StorageDescriptor? = nil, tableType: String? = nil, targetTable: TableIdentifier? = nil, updateTime: Date? = nil, versionId: String? = nil, viewDefinition: ViewDefinition? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { self.catalogId = catalogId self.createdBy = createdBy self.createTime = createTime @@ -20590,6 +21166,7 @@ extension Glue { self.parameters = parameters self.partitionKeys = partitionKeys self.retention = retention + self.status = status self.storageDescriptor = storageDescriptor self.tableType = tableType self.targetTable = targetTable @@ -20616,6 +21193,7 @@ extension Glue { case parameters = "Parameters" case partitionKeys = "PartitionKeys" case retention = "Retention" + case status = "Status" case storageDescriptor = "StorageDescriptor" case tableType = "TableType" case targetTable = "TargetTable" @@ -20848,6 +21426,47 @@ extension Glue { } } + public final class TableStatus: AWSDecodableShape { + /// Indicates which action was called on the table, currently only CREATE or UPDATE. + public let action: ResourceAction? + /// A StatusDetails object with information about the requested change. + public let details: StatusDetails? + /// An error that will only appear when the state is "FAILED". This is a parent level exception message, there may be different Errors for each dialect. + public let error: ErrorDetail? + /// The ARN of the user who requested the asynchronous change. + public let requestedBy: String? + /// An ISO 8601 formatted date string indicating the time that the change was initiated. + public let requestTime: Date? + /// A generic status for the change in progress, such as QUEUED, IN_PROGRESS, SUCCESS, or FAILED. + public let state: ResourceState? + /// The ARN of the user to last manually alter the asynchronous change (requesting cancellation, etc). + public let updatedBy: String? + /// An ISO 8601 formatted date string indicating the time that the state was last updated. + public let updateTime: Date? + + public init(action: ResourceAction? = nil, details: StatusDetails? = nil, error: ErrorDetail? = nil, requestedBy: String? = nil, requestTime: Date? = nil, state: ResourceState? = nil, updatedBy: String? = nil, updateTime: Date? = nil) { + self.action = action + self.details = details + self.error = error + self.requestedBy = requestedBy + self.requestTime = requestTime + self.state = state + self.updatedBy = updatedBy + self.updateTime = updateTime + } + + private enum CodingKeys: String, CodingKey { + case action = "Action" + case details = "Details" + case error = "Error" + case requestedBy = "RequestedBy" + case requestTime = "RequestTime" + case state = "State" + case updatedBy = "UpdatedBy" + case updateTime = "UpdateTime" + } + } + public struct TableVersion: AWSDecodableShape { /// The table in question. public let table: Table? @@ -21039,6 +21658,40 @@ extension Glue { } } + public struct TimestampFilter: AWSEncodableShape { + /// The timestamp after which statistics should be included in the results. + public let recordedAfter: Date? + /// The timestamp before which statistics should be included in the results. + public let recordedBefore: Date? + + public init(recordedAfter: Date? = nil, recordedBefore: Date? = nil) { + self.recordedAfter = recordedAfter + self.recordedBefore = recordedBefore + } + + private enum CodingKeys: String, CodingKey { + case recordedAfter = "RecordedAfter" + case recordedBefore = "RecordedBefore" + } + } + + public struct TimestampedInclusionAnnotation: AWSDecodableShape { + /// The timestamp when the inclusion annotation was last modified. + public let lastModifiedOn: Date? + /// The inclusion annotation value. + public let value: InclusionAnnotationValue? + + public init(lastModifiedOn: Date? = nil, value: InclusionAnnotationValue? = nil) { + self.lastModifiedOn = lastModifiedOn + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case lastModifiedOn = "LastModifiedOn" + case value = "Value" + } + } + public struct TransformConfigParameter: AWSEncodableShape & AWSDecodableShape { /// Specifies whether the parameter is optional or not in the config file of the dynamic transform. public let isOptional: Bool? @@ -23016,6 +23669,39 @@ extension Glue { } } + public struct ViewValidation: AWSDecodableShape { + /// The dialect of the query engine. + public let dialect: ViewDialect? + /// The version of the dialect of the query engine. For example, 3.0.0. + public let dialectVersion: String? + /// An error associated with the validation. + public let error: ErrorDetail? + /// The state of the validation. + public let state: ResourceState? + /// The time of the last update. + public let updateTime: Date? + /// The SELECT query that defines the view, as provided by the customer. + public let viewValidationText: String? + + public init(dialect: ViewDialect? = nil, dialectVersion: String? = nil, error: ErrorDetail? = nil, state: ResourceState? = nil, updateTime: Date? = nil, viewValidationText: String? = nil) { + self.dialect = dialect + self.dialectVersion = dialectVersion + self.error = error + self.state = state + self.updateTime = updateTime + self.viewValidationText = viewValidationText + } + + private enum CodingKeys: String, CodingKey { + case dialect = "Dialect" + case dialectVersion = "DialectVersion" + case error = "Error" + case state = "State" + case updateTime = "UpdateTime" + case viewValidationText = "ViewValidationText" + } + } + public struct Workflow: AWSDecodableShape { /// This structure indicates the details of the blueprint that this particular workflow is created from. public let blueprintDetails: BlueprintDetails? diff --git a/Sources/Soto/Services/IAM/IAM_api.swift b/Sources/Soto/Services/IAM/IAM_api.swift index 0827d7d7e7..3ee0a3801c 100644 --- a/Sources/Soto/Services/IAM/IAM_api.swift +++ b/Sources/Soto/Services/IAM/IAM_api.swift @@ -254,7 +254,7 @@ public struct IAM: AWSService { ) } - /// Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC). The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider. If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide. When you create the IAM OIDC provider, you specify the following: The URL of the OIDC identity provider (IdP) to trust A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider A list of tags that are attached to the specified IAM OIDC provider A list of thumbprints of one or more server certificates that the IdP uses You get all of this information from the OIDC IdP you want to use to access Amazon Web Services. Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users. + /// Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC). The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider. If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide. When you create the IAM OIDC provider, you specify the following: The URL of the OIDC identity provider (IdP) to trust A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider A list of tags that are attached to the specified IAM OIDC provider A list of thumbprints of one or more server certificates that the IdP uses You get all of this information from the OIDC IdP you want to use to access Amazon Web Services. Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration. The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users. @Sendable public func createOpenIDConnectProvider(_ input: CreateOpenIDConnectProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateOpenIDConnectProviderResponse { return try await self.client.execute( @@ -1120,7 +1120,7 @@ public struct IAM: AWSService { ) } - /// Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide. + /// Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the IAM User Guide. @Sendable public func listAccountAliases(_ input: ListAccountAliasesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAccountAliasesResponse { return try await self.client.execute( @@ -2004,7 +2004,7 @@ public struct IAM: AWSService { ) } - /// Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.) Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated. Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users. + /// Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.) Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated. Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration. Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users. @Sendable public func updateOpenIDConnectProviderThumbprint(_ input: UpdateOpenIDConnectProviderThumbprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -2231,7 +2231,7 @@ extension IAM { ) } - /// Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide. + /// Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the IAM User Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/IAM/IAM_shapes.swift b/Sources/Soto/Services/IAM/IAM_shapes.swift index 3d0360b21b..39151ebc43 100644 --- a/Sources/Soto/Services/IAM/IAM_shapes.swift +++ b/Sources/Soto/Services/IAM/IAM_shapes.swift @@ -263,13 +263,13 @@ extension IAM { public struct AccessKeyLastUsed: AWSDecodableShape { /// The date and time, in ISO 8601 date-time format, when the access key was most recently used. This field is null in the following situations: The user does not have an access key. An access key exists but has not been used since IAM began tracking this information. There is no sign-in data associated with the user. - public let lastUsedDate: Date + public let lastUsedDate: Date? /// The Amazon Web Services Region where this access key was most recently used. The value for this field is "N/A" in the following situations: The user does not have an access key. An access key exists but has not been used since IAM began tracking this information. There is no sign-in data associated with the user. For more information about Amazon Web Services Regions, see Regions and endpoints in the Amazon Web Services General Reference. public let region: String /// The name of the Amazon Web Services service with which this access key was most recently used. The value of this field is "N/A" in the following situations: The user does not have an access key. An access key exists but has not been used since IAM started tracking this information. There is no sign-in data associated with the user. public let serviceName: String - public init(lastUsedDate: Date, region: String, serviceName: String) { + public init(lastUsedDate: Date? = nil, region: String, serviceName: String) { self.lastUsedDate = lastUsedDate self.region = region self.serviceName = serviceName diff --git a/Sources/Soto/Services/IVS/IVS_api.swift b/Sources/Soto/Services/IVS/IVS_api.swift index 7d6e719613..f8d0652ecf 100644 --- a/Sources/Soto/Services/IVS/IVS_api.swift +++ b/Sources/Soto/Services/IVS/IVS_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS IVS service. /// -/// Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Key Concepts Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream. Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. Playback restriction policy — Restricts playback by countries and/or origin sites. For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming. Tagging A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. At most 50 tags can be applied to a resource. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for "playback authorization.") Authentication All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it’s your responsibility to sign the requests. You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures — See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions — See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference. Channel Endpoints CreateChannel — Creates a new channel and an associated stream key to start streaming. GetChannel — Gets the channel configuration for the specified channel ARN. BatchGetChannel — Performs GetChannel on multiple ARNs simultaneously. ListChannels — Gets summary information about all channels in your account, in the Amazon Web Services region where the API request is processed. This list can be filtered to match a specified name or recording-configuration ARN. Filters are mutually exclusive and cannot be used together. If you try to use both filters, you will get an error (409 Conflict Exception). UpdateChannel — Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect. DeleteChannel — Deletes the specified channel. Playback Restriction Policy Endpoints CreatePlaybackRestrictionPolicy — Creates a new playback restriction policy, for constraining playback by countries and/or origins. DeletePlaybackRestrictionPolicy — Deletes the specified playback restriction policy GetPlaybackRestrictionPolicy — Gets the specified playback restriction policy. ListPlaybackRestrictionPolicies — Gets summary information about playback restriction policies. UpdatePlaybackRestrictionPolicy — Updates a specified playback restriction policy. Private Channel Endpoints For more information, see Setting Up Private Channels in the Amazon IVS User Guide. ImportPlaybackKeyPair — Imports the public portion of a new key pair and returns its arn and fingerprint. The privateKey can then be used to generate viewer authorization tokens, to grant viewers access to private channels (channels enabled for playback authorization). GetPlaybackKeyPair — Gets a specified playback authorization key pair and returns the arn and fingerprint. The privateKey held by the caller can be used to generate viewer authorization tokens, to grant viewers access to private channels. ListPlaybackKeyPairs — Gets summary information about playback key pairs. DeletePlaybackKeyPair — Deletes a specified authorization key pair. This invalidates future viewer tokens generated using the key pair’s privateKey. StartViewerSessionRevocation — Starts the process of revoking the viewer session associated with a specified channel ARN and viewer ID. Optionally, you can provide a version to revoke viewer sessions less than and including that version. BatchStartViewerSessionRevocation — Performs StartViewerSessionRevocation on multiple channel ARN and viewer ID pairs simultaneously. Recording Configuration Endpoints CreateRecordingConfiguration — Creates a new recording configuration, used to enable recording to Amazon S3. GetRecordingConfiguration — Gets the recording-configuration metadata for the specified ARN. ListRecordingConfigurations — Gets summary information about all recording configurations in your account, in the Amazon Web Services region where the API request is processed. DeleteRecordingConfiguration — Deletes the recording configuration for the specified ARN. Stream Endpoints GetStream — Gets information about the active (live) stream on a specified channel. GetStreamSession — Gets metadata on a specified stream. ListStreams — Gets summary information about live streams in your account, in the Amazon Web Services region where the API request is processed. ListStreamSessions — Gets a summary of current and previous streams for a specified channel in your account, in the AWS region where the API request is processed. StopStream — Disconnects the incoming RTMPS stream for the specified channel. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel. PutMetadata — Inserts metadata into the active stream of the specified channel. At most 5 requests per second per channel are allowed, each with a maximum 1 KB payload. (If 5 TPS is not sufficient for your needs, we recommend batching your data into a single PutMetadata call.) At most 155 requests per second per account are allowed. Stream Key Endpoints CreateStreamKey — Creates a stream key, used to initiate a stream, for the specified channel ARN. GetStreamKey — Gets stream key information for the specified ARN. BatchGetStreamKey — Performs GetStreamKey on multiple ARNs simultaneously. ListStreamKeys — Gets summary information about stream keys for the specified channel. DeleteStreamKey — Deletes the stream key for the specified ARN, so it can no longer be used to stream. Amazon Web Services Tags Endpoints TagResource — Adds or updates tags for the Amazon Web Services resource with the specified ARN. UntagResource — Removes tags from the resource with the specified ARN. ListTagsForResource — Gets information about Amazon Web Services tags for the specified ARN. +/// Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Key Concepts Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream. Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. Playback restriction policy — Restricts playback by countries and/or origin sites. For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming. Tagging A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. At most 50 tags can be applied to a resource. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for "playback authorization.") Authentication All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it’s your responsibility to sign the requests. You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures — See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions — See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference. public struct IVS: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/IVS/IVS_shapes.swift b/Sources/Soto/Services/IVS/IVS_shapes.swift index b96e0e4449..1a3136effb 100644 --- a/Sources/Soto/Services/IVS/IVS_shapes.swift +++ b/Sources/Soto/Services/IVS/IVS_shapes.swift @@ -461,9 +461,9 @@ extension IVS { try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9-_]*$") try self.validate(self.playbackRestrictionPolicyArn, name: "playbackRestrictionPolicyArn", parent: name, max: 128) - try self.validate(self.playbackRestrictionPolicyArn, name: "playbackRestrictionPolicyArn", parent: name, pattern: "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$") + try self.validate(self.playbackRestrictionPolicyArn, name: "playbackRestrictionPolicyArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$$") try self.validate(self.recordingConfigurationArn, name: "recordingConfigurationArn", parent: name, max: 128) - try self.validate(self.recordingConfigurationArn, name: "recordingConfigurationArn", parent: name, pattern: "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$") + try self.validate(self.recordingConfigurationArn, name: "recordingConfigurationArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$$") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -1085,9 +1085,9 @@ extension IVS { try self.validate(self.filterByName, name: "filterByName", parent: name, max: 128) try self.validate(self.filterByName, name: "filterByName", parent: name, pattern: "^[a-zA-Z0-9-_]*$") try self.validate(self.filterByPlaybackRestrictionPolicyArn, name: "filterByPlaybackRestrictionPolicyArn", parent: name, max: 128) - try self.validate(self.filterByPlaybackRestrictionPolicyArn, name: "filterByPlaybackRestrictionPolicyArn", parent: name, pattern: "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$") + try self.validate(self.filterByPlaybackRestrictionPolicyArn, name: "filterByPlaybackRestrictionPolicyArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$$") try self.validate(self.filterByRecordingConfigurationArn, name: "filterByRecordingConfigurationArn", parent: name, max: 128) - try self.validate(self.filterByRecordingConfigurationArn, name: "filterByRecordingConfigurationArn", parent: name, pattern: "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$") + try self.validate(self.filterByRecordingConfigurationArn, name: "filterByRecordingConfigurationArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) @@ -2099,9 +2099,9 @@ extension IVS { try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9-_]*$") try self.validate(self.playbackRestrictionPolicyArn, name: "playbackRestrictionPolicyArn", parent: name, max: 128) - try self.validate(self.playbackRestrictionPolicyArn, name: "playbackRestrictionPolicyArn", parent: name, pattern: "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$") + try self.validate(self.playbackRestrictionPolicyArn, name: "playbackRestrictionPolicyArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$$") try self.validate(self.recordingConfigurationArn, name: "recordingConfigurationArn", parent: name, max: 128) - try self.validate(self.recordingConfigurationArn, name: "recordingConfigurationArn", parent: name, pattern: "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$") + try self.validate(self.recordingConfigurationArn, name: "recordingConfigurationArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift b/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift index 9e9f58d4d4..07f779714a 100644 --- a/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift +++ b/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift @@ -2858,7 +2858,9 @@ extension Inspector2 { try self.validate(self.accountIds, name: "accountIds", parent: name, max: 10000) try self.validate(self.accountIds, name: "accountIds", parent: name, min: 1) try self.targetResourceTags.forEach { + try validate($0.key, name: "targetResourceTags.key", parent: name, max: 128) try validate($0.key, name: "targetResourceTags.key", parent: name, min: 1) + try validate($0.key, name: "targetResourceTags.key", parent: name, pattern: "^[\\p{L}\\p{Z}\\p{N}_.:/=\\-@]*$") try validate($0.value, name: "targetResourceTags[\"\($0.key)\"]", parent: name, max: 5) try validate($0.value, name: "targetResourceTags[\"\($0.key)\"]", parent: name, min: 1) } @@ -7453,7 +7455,9 @@ extension Inspector2 { try self.validate(self.accountIds, name: "accountIds", parent: name, max: 10000) try self.validate(self.accountIds, name: "accountIds", parent: name, min: 1) try self.targetResourceTags?.forEach { + try validate($0.key, name: "targetResourceTags.key", parent: name, max: 128) try validate($0.key, name: "targetResourceTags.key", parent: name, min: 1) + try validate($0.key, name: "targetResourceTags.key", parent: name, pattern: "^[\\p{L}\\p{Z}\\p{N}_.:/=\\-@]*$") try validate($0.value, name: "targetResourceTags[\"\($0.key)\"]", parent: name, max: 5) try validate($0.value, name: "targetResourceTags[\"\($0.key)\"]", parent: name, min: 1) } diff --git a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift index fa9c18fad7..cd11eef4e9 100644 --- a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift +++ b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift @@ -3986,7 +3986,7 @@ extension IoTFleetWise { } } - public struct StructuredMessageListDefinition: AWSEncodableShape & AWSDecodableShape { + public final class StructuredMessageListDefinition: AWSEncodableShape & AWSDecodableShape { /// The capacity of the structured message list definition when the list type is FIXED_CAPACITY or DYNAMIC_BOUNDED_CAPACITY. public let capacity: Int? /// The type of list of the structured message list definition. diff --git a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift index d24ec95aa9..932511eeb8 100644 --- a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift +++ b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift @@ -238,7 +238,7 @@ public struct IoTSiteWise: AWSService { ) } - /// Creates a custom composite model from specified property and hierarchy definitions. There are two types of custom composite models, inline and component-model-based. Use component-model-based custom composite models to define standard, reusable components. A component-model-based custom composite model consists of a name, a description, and the ID of the component model it references. A component-model-based custom composite model has no properties of its own; its referenced component model provides its associated properties to any created assets. For more information, see Custom composite models (Components) in the IoT SiteWise User Guide. Use inline custom composite models to organize the properties of an asset model. The properties of inline custom composite models are local to the asset model where they are included and can't be used to create multiple assets. To create a component-model-based model, specify the composedAssetModelId of an existing asset model with assetModelType of COMPONENT_MODEL. To create an inline model, specify the assetModelCompositeModelProperties and don't include an composedAssetModelId. + /// Creates a custom composite model from specified property and hierarchy definitions. There are two types of custom composite models, inline and component-model-based. Use component-model-based custom composite models to define standard, reusable components. A component-model-based custom composite model consists of a name, a description, and the ID of the component model it references. A component-model-based custom composite model has no properties of its own; its referenced component model provides its associated properties to any created assets. For more information, see Custom composite models (Components) in the IoT SiteWise User Guide. Use inline custom composite models to organize the properties of an asset model. The properties of inline custom composite models are local to the asset model where they are included and can't be used to create multiple assets. To create a component-model-based model, specify the composedAssetModelId of an existing asset model with assetModelType of COMPONENT_MODEL. To create an inline model, specify the assetModelCompositeModelProperties and don't include an composedAssetModelId. @Sendable public func createAssetModelCompositeModel(_ input: CreateAssetModelCompositeModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAssetModelCompositeModelResponse { return try await self.client.execute( @@ -252,7 +252,7 @@ public struct IoTSiteWise: AWSService { ) } - /// Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a bulk import job (CLI) in the Amazon Simple Storage Service User Guide. Before you create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier. For more information about how to configure storage settings, see PutStorageConfiguration. Bulk import is designed to store historical data to IoT SiteWise. It does not trigger computations or notifications on IoT SiteWise warm or cold tier storage. + /// Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a bulk import job (CLI) in the Amazon Simple Storage Service User Guide. Before you create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier. For more information about how to configure storage settings, see PutStorageConfiguration. Bulk import is designed to store historical data to IoT SiteWise. It does not trigger computations or notifications on IoT SiteWise warm or cold tier storage. @Sendable public func createBulkImportJob(_ input: CreateBulkImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBulkImportJobResponse { return try await self.client.execute( @@ -364,7 +364,7 @@ public struct IoTSiteWise: AWSService { ) } - /// Deletes a composite model. This action can't be undone. You must delete all assets created from a composite model before you can delete the model. Also, you can't delete a composite model if a parent asset model exists that contains a property formula expression that depends on the asset model that you want to delete. For more information, see Deleting assets and models in the IoT SiteWise User Guide. + /// Deletes a composite model. This action can't be undone. You must delete all assets created from a composite model before you can delete the model. Also, you can't delete a composite model if a parent asset model exists that contains a property formula expression that depends on the asset model that you want to delete. For more information, see Deleting assets and models in the IoT SiteWise User Guide. @Sendable public func deleteAssetModelCompositeModel(_ input: DeleteAssetModelCompositeModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAssetModelCompositeModelResponse { return try await self.client.execute( @@ -490,7 +490,7 @@ public struct IoTSiteWise: AWSService { ) } - /// Retrieves information about an asset composite model (also known as an asset component). An AssetCompositeModel is an instance of an AssetModelCompositeModel. If you want to see information about the model this is based on, call DescribeAssetModelCompositeModel. + /// Retrieves information about an asset composite model (also known as an asset component). An AssetCompositeModel is an instance of an AssetModelCompositeModel. If you want to see information about the model this is based on, call DescribeAssetModelCompositeModel. @Sendable public func describeAssetCompositeModel(_ input: DescribeAssetCompositeModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAssetCompositeModelResponse { return try await self.client.execute( @@ -1148,7 +1148,7 @@ public struct IoTSiteWise: AWSService { ) } - /// Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide. This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel. If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property. + /// Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide. If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. You can’t change the type or data type of an existing property. To replace an existing asset model property with a new one with the same name, do the following: Submit an UpdateAssetModel request with the entire existing property removed. Submit a second UpdateAssetModel request that includes the new property. The new asset property will have the same name as the previous one and IoT SiteWise will generate a new unique id. @Sendable public func updateAssetModel(_ input: UpdateAssetModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAssetModelResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift index 94bb7abe8b..fb100e69e8 100644 --- a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift +++ b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift @@ -119,6 +119,7 @@ extension IoTSiteWise { public enum CapabilitySyncStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case inSync = "IN_SYNC" + case notApplicable = "NOT_APPLICABLE" case outOfSync = "OUT_OF_SYNC" case syncFailed = "SYNC_FAILED" case unknown = "UNKNOWN" @@ -564,7 +565,7 @@ extension IoTSiteWise { public struct AssetCompositeModelSummary: AWSDecodableShape { /// A description of the composite model that this summary describes. public let description: String - /// An external ID to assign to the asset model. If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of. + /// An external ID to assign to the asset model. If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of. public let externalId: String? /// The ID of the composite model that this summary describes. public let id: String @@ -779,17 +780,17 @@ extension IoTSiteWise { } public struct AssetModelCompositeModelSummary: AWSDecodableShape { - /// The description of the the composite model that this summary describes.. + /// The description of the composite model that this summary describes.. public let description: String? /// The external ID of a composite model on this asset model. For more information, see Using external IDs in the IoT SiteWise User Guide. public let externalId: String? - /// The ID of the the composite model that this summary describes.. + /// The ID of the composite model that this summary describes.. public let id: String - /// The name of the the composite model that this summary describes.. + /// The name of the composite model that this summary describes.. public let name: String /// The path that includes all the pieces that make up the composite model. public let path: [AssetModelCompositeModelPathSegment]? - /// The type of asset model. ASSET_MODEL – (default) An asset model that you can use to create assets. Can't be included as a component in another asset model. COMPONENT_MODEL – A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model. + /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. public let type: String public init(description: String? = nil, externalId: String? = nil, id: String, name: String, path: [AssetModelCompositeModelPathSegment]? = nil, type: String) { @@ -2336,7 +2337,7 @@ extension IoTSiteWise { public struct CompositionRelationshipSummary: AWSDecodableShape { /// The ID of a composite model on this asset model. public let assetModelCompositeModelId: String - /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. + /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. public let assetModelCompositeModelType: String /// The ID of the asset model, in UUID format. public let assetModelId: String @@ -2452,13 +2453,13 @@ extension IoTSiteWise { public struct CreateAssetModelCompositeModelRequest: AWSEncodableShape { /// A description for the composite model. public let assetModelCompositeModelDescription: String? - /// An external ID to assign to the composite model. If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of. + /// An external ID to assign to the composite model. If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of. public let assetModelCompositeModelExternalId: String? - /// The ID of the composite model. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique. + /// The ID of the composite model. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique. public let assetModelCompositeModelId: String? - /// A unique, friendly name for the composite model. + /// A unique name for the composite model. public let assetModelCompositeModelName: String - /// The property definitions of the composite model. For more information, see . You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide. + /// The property definitions of the composite model. For more information, see Inline custom composite models in the IoT SiteWise User Guide. You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide. public let assetModelCompositeModelProperties: [AssetModelPropertyDefinition]? /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. public let assetModelCompositeModelType: String @@ -2466,7 +2467,7 @@ extension IoTSiteWise { public let assetModelId: String /// A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. public let clientToken: String? - /// The ID of a composite model on this asset. + /// The ID of a component model which is reused to create this composite model. public let composedAssetModelId: String? /// The ID of the parent composite model in this asset model relationship. public let parentAssetModelCompositeModelId: String? @@ -2566,7 +2567,7 @@ extension IoTSiteWise { } public struct CreateAssetModelRequest: AWSEncodableShape { - /// The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see . + /// The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see Creating custom composite models (Components) in the IoT SiteWise User Guide. public let assetModelCompositeModels: [AssetModelCompositeModelDefinition]? /// A description for the asset model. public let assetModelDescription: String? @@ -2576,7 +2577,7 @@ extension IoTSiteWise { public let assetModelHierarchies: [AssetModelHierarchyDefinition]? /// The ID to assign to the asset model, if desired. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique. public let assetModelId: String? - /// A unique, friendly name for the asset model. + /// A unique name for the asset model. public let assetModelName: String /// The property definitions of the asset model. For more information, see Asset properties in the IoT SiteWise User Guide. You can specify up to 200 properties per asset model. For more information, see Quotas in the IoT SiteWise User Guide. public let assetModelProperties: [AssetModelPropertyDefinition]? @@ -2901,7 +2902,7 @@ extension IoTSiteWise { } public struct CreateGatewayRequest: AWSEncodableShape { - /// A unique, friendly name for the gateway. + /// A unique name for the gateway. public let gatewayName: String /// The gateway's platform. You can only specify one platform in a gateway. public let gatewayPlatform: GatewayPlatform @@ -3716,7 +3717,7 @@ extension IoTSiteWise { public let actionDefinitions: [ActionDefinition]? /// A description for the composite model. public let assetCompositeModelDescription: String - /// An external ID to assign to the asset model. If the composite model is a component-based composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of. + /// An external ID to assign to the asset model. If the composite model is a component-based composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of. public let assetCompositeModelExternalId: String? /// The ID of a composite model on this asset. public let assetCompositeModelId: String @@ -3728,7 +3729,7 @@ extension IoTSiteWise { public let assetCompositeModelProperties: [AssetProperty] /// The list of composite model summaries. public let assetCompositeModelSummaries: [AssetCompositeModelSummary] - /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. + /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. public let assetCompositeModelType: String /// The ID of the asset, in UUID format. This ID uniquely identifies the asset within IoT SiteWise and can be used with other IoT SiteWise APIs. public let assetId: String @@ -3807,7 +3808,7 @@ extension IoTSiteWise { public let assetModelCompositeModelProperties: [AssetModelProperty] /// The list of composite model summaries for the composite model. public let assetModelCompositeModelSummaries: [AssetModelCompositeModelSummary] - /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. + /// The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. public let assetModelCompositeModelType: String /// The ID of the asset model, in UUID format. public let assetModelId: String @@ -5010,20 +5011,25 @@ extension IoTSiteWise { public let greengrass: Greengrass? /// A gateway that runs on IoT Greengrass V2. public let greengrassV2: GreengrassV2? + /// A SiteWise Edge gateway that runs on a Siemens Industrial Edge Device. + public let siemensIE: SiemensIE? - public init(greengrass: Greengrass? = nil, greengrassV2: GreengrassV2? = nil) { + public init(greengrass: Greengrass? = nil, greengrassV2: GreengrassV2? = nil, siemensIE: SiemensIE? = nil) { self.greengrass = greengrass self.greengrassV2 = greengrassV2 + self.siemensIE = siemensIE } public func validate(name: String) throws { try self.greengrass?.validate(name: "\(name).greengrass") try self.greengrassV2?.validate(name: "\(name).greengrassV2") + try self.siemensIE?.validate(name: "\(name).siemensIE") } private enum CodingKeys: String, CodingKey { case greengrass = "greengrass" case greengrassV2 = "greengrassV2" + case siemensIE = "siemensIE" } } @@ -5034,7 +5040,7 @@ extension IoTSiteWise { public let gatewayCapabilitySummaries: [GatewayCapabilitySummary]? /// The ID of the gateway device. public let gatewayId: String - /// The name of the asset. + /// The name of the gateway. public let gatewayName: String public let gatewayPlatform: GatewayPlatform? /// The date the gateway was last updated, in Unix epoch time. @@ -5402,7 +5408,7 @@ extension IoTSiteWise { } public struct Greengrass: AWSEncodableShape & AWSDecodableShape { - /// The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the IoT Greengrass API Reference. + /// The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the IoT Greengrass V1 API Reference. public let groupArn: String public init(groupArn: String) { @@ -5431,6 +5437,7 @@ extension IoTSiteWise { public func validate(name: String) throws { try self.validate(self.coreDeviceThingName, name: "coreDeviceThingName", parent: name, max: 128) try self.validate(self.coreDeviceThingName, name: "coreDeviceThingName", parent: name, min: 1) + try self.validate(self.coreDeviceThingName, name: "coreDeviceThingName", parent: name, pattern: "^[a-zA-Z0-9:_-]+$") } private enum CodingKeys: String, CodingKey { @@ -7211,6 +7218,25 @@ extension IoTSiteWise { } } + public struct SiemensIE: AWSEncodableShape & AWSDecodableShape { + /// The name of the IoT Thing for your SiteWise Edge gateway. + public let iotCoreThingName: String + + public init(iotCoreThingName: String) { + self.iotCoreThingName = iotCoreThingName + } + + public func validate(name: String) throws { + try self.validate(self.iotCoreThingName, name: "iotCoreThingName", parent: name, max: 128) + try self.validate(self.iotCoreThingName, name: "iotCoreThingName", parent: name, min: 1) + try self.validate(self.iotCoreThingName, name: "iotCoreThingName", parent: name, pattern: "^[a-zA-Z0-9:_-]+$") + } + + private enum CodingKeys: String, CodingKey { + case iotCoreThingName = "iotCoreThingName" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// The ARN of the resource to tag. public let resourceArn: String @@ -7498,13 +7524,13 @@ extension IoTSiteWise { public struct UpdateAssetModelCompositeModelRequest: AWSEncodableShape { /// A description for the composite model. public let assetModelCompositeModelDescription: String? - /// An external ID to assign to the asset model. You can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to the exact same thing as when it was created. + /// An external ID to assign to the asset model. You can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to the exact same thing as when it was created. public let assetModelCompositeModelExternalId: String? /// The ID of a composite model on this asset model. public let assetModelCompositeModelId: String - /// A unique, friendly name for the composite model. + /// A unique name for the composite model. public let assetModelCompositeModelName: String - /// The property definitions of the composite model. For more information, see . You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide. + /// The property definitions of the composite model. For more information, see Inline custom composite models in the IoT SiteWise User Guide. You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide. public let assetModelCompositeModelProperties: [AssetModelProperty]? /// The ID of the asset model, in UUID format. public let assetModelId: String @@ -7583,7 +7609,7 @@ extension IoTSiteWise { } public struct UpdateAssetModelRequest: AWSEncodableShape { - /// The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see . + /// The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see Creating custom composite models (Components) in the IoT SiteWise User Guide. public let assetModelCompositeModels: [AssetModelCompositeModel]? /// A description for the asset model. public let assetModelDescription: String? @@ -7593,7 +7619,7 @@ extension IoTSiteWise { public let assetModelHierarchies: [AssetModelHierarchy]? /// The ID of the asset model to update. This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide. public let assetModelId: String - /// A unique, friendly name for the asset model. + /// A unique name for the asset model. public let assetModelName: String /// The updated property definitions of the asset model. For more information, see Asset properties in the IoT SiteWise User Guide. You can specify up to 200 properties per asset model. For more information, see Quotas in the IoT SiteWise User Guide. public let assetModelProperties: [AssetModelProperty]? @@ -7885,7 +7911,7 @@ extension IoTSiteWise { } public func validate(name: String) throws { - try self.validate(self.capabilityConfiguration, name: "capabilityConfiguration", parent: name, max: 104857600) + try self.validate(self.capabilityConfiguration, name: "capabilityConfiguration", parent: name, max: 10000000) try self.validate(self.capabilityConfiguration, name: "capabilityConfiguration", parent: name, min: 1) try self.validate(self.capabilityNamespace, name: "capabilityNamespace", parent: name, max: 512) try self.validate(self.capabilityNamespace, name: "capabilityNamespace", parent: name, min: 1) @@ -7921,7 +7947,7 @@ extension IoTSiteWise { public struct UpdateGatewayRequest: AWSEncodableShape { /// The ID of the gateway to update. public let gatewayId: String - /// A unique, friendly name for the gateway. + /// A unique name for the gateway. public let gatewayName: String public init(gatewayId: String, gatewayName: String) { @@ -8118,7 +8144,7 @@ extension IoTSiteWise { } public struct VariableValue: AWSEncodableShape & AWSDecodableShape { - /// The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide. You use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same propertyId. For example, you might have separately grouped assets that come from the same asset model. For more information, see Asset hierarchies in the IoT SiteWise User Guide. + /// The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide. You use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same propertyId. For example, you might have separately grouped assets that come from the same asset model. For more information, see Asset hierarchies in the IoT SiteWise User Guide. public let hierarchyId: String? /// The ID of the property to use as the variable. You can use the property name if it's from the same asset model. If the property has an external ID, you can specify externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide. public let propertyId: String? @@ -8155,7 +8181,7 @@ extension IoTSiteWise { public let booleanValue: Bool? /// Asset property data of type double (floating point number). public let doubleValue: Double? - /// Asset property data of type integer (number that's greater than or equal to zero). + /// Asset property data of type integer (whole number). public let integerValue: Int? /// Asset property data of type string (sequence of characters). public let stringValue: String? diff --git a/Sources/Soto/Services/KinesisAnalytics/KinesisAnalytics_api.swift b/Sources/Soto/Services/KinesisAnalytics/KinesisAnalytics_api.swift index c9297d213e..38179adcbd 100644 --- a/Sources/Soto/Services/KinesisAnalytics/KinesisAnalytics_api.swift +++ b/Sources/Soto/Services/KinesisAnalytics/KinesisAnalytics_api.swift @@ -60,6 +60,7 @@ public struct KinesisAnalytics: AWSService { serviceProtocol: .json(version: "1.1"), apiVersion: "2015-08-14", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: KinesisAnalyticsErrorType.self, xmlNamespace: "http://analytics.kinesis.amazonaws.com/doc/2015-08-14", middleware: middleware, @@ -72,6 +73,19 @@ public struct KinesisAnalytics: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "ca-central-1": "kinesisanalytics-fips.ca-central-1.amazonaws.com", + "ca-west-1": "kinesisanalytics-fips.ca-west-1.amazonaws.com", + "us-east-1": "kinesisanalytics-fips.us-east-1.amazonaws.com", + "us-east-2": "kinesisanalytics-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "kinesisanalytics-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisanalytics-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "kinesisanalytics-fips.us-west-1.amazonaws.com", + "us-west-2": "kinesisanalytics-fips.us-west-2.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift b/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift index e42618f263..7f1a5042d0 100644 --- a/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift +++ b/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift @@ -60,6 +60,7 @@ public struct KinesisAnalyticsV2: AWSService { serviceProtocol: .json(version: "1.1"), apiVersion: "2018-05-23", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: KinesisAnalyticsV2ErrorType.self, xmlNamespace: "http://analytics.kinesis.amazonaws.com/doc/2018-05-23", middleware: middleware, @@ -72,6 +73,19 @@ public struct KinesisAnalyticsV2: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "ca-central-1": "kinesisanalytics-fips.ca-central-1.amazonaws.com", + "ca-west-1": "kinesisanalytics-fips.ca-west-1.amazonaws.com", + "us-east-1": "kinesisanalytics-fips.us-east-1.amazonaws.com", + "us-east-2": "kinesisanalytics-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "kinesisanalytics-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisanalytics-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "kinesisanalytics-fips.us-west-1.amazonaws.com", + "us-west-2": "kinesisanalytics-fips.us-west-2.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift b/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift index 40de1d24c9..268bd392f7 100644 --- a/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift +++ b/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS KinesisVideoWebRTCStorage service. /// -/// +/// webrtc public struct KinesisVideoWebRTCStorage: AWSService { // MARK: Member variables @@ -87,7 +87,7 @@ public struct KinesisVideoWebRTCStorage: AWSService { // MARK: API Calls - /// Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing device for an input channel. If there’s no existing session for the channel, a new streaming session needs to be created, and the Amazon Resource Name (ARN) of the signaling channel must be provided. Currently for the SINGLE_MASTER type, a video producing device is able to ingest both audio and video media into a stream, while viewers can only ingest audio. Both a video producing device and viewers can join the session first, and wait for other participants. While participants are having peer to peer conversations through webRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple viewers are able to playback real-time media. Customers can also use existing Kinesis Video Streams features like HLS or DASH playback, Image generation, and more with ingested WebRTC media. Assume that only one video producing device client can be associated with a session for the channel. If more than one client joins the session of a specific channel as a video producing device, the most recent client request takes precedence. + /// Before using this API, you must call the GetSignalingChannelEndpoint API to request the WEBRTC endpoint. You then specify the endpoint and region in your JoinStorageSession API request. Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing device for an input channel. If there’s no existing session for the channel, a new streaming session needs to be created, and the Amazon Resource Name (ARN) of the signaling channel must be provided. Currently for the SINGLE_MASTER type, a video producing device is able to ingest both audio and video media into a stream. Only video producing devices can join the session and record media. Both audio and video tracks are currently required for WebRTC ingestion. Current requirements: Video track: H.264 Audio track: Opus The resulting ingested video in the Kinesis video stream will have the following parameters: H.264 video and AAC audio. Once a master participant has negotiated a connection through WebRTC, the ingested media session will be stored in the Kinesis video stream. Multiple viewers are then able to play back real-time media through our Playback APIs. You can also use existing Kinesis Video Streams features like HLS or DASH playback, image generation via GetImages, and more with ingested WebRTC media. S3 image delivery and notifications are not currently supported. Assume that only one video producing device client can be associated with a session for the channel. If more than one client joins the session of a specific channel as a video producing device, the most recent client request takes precedence. Additional information Idempotent - This API is not idempotent. Retry behavior - This is counted as a new API call. Concurrent calls - Concurrent calls are allowed. An offer is sent once per each call. @Sendable public func joinStorageSession(_ input: JoinStorageSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -99,6 +99,19 @@ public struct KinesisVideoWebRTCStorage: AWSService { logger: logger ) } + + /// Join the ongoing one way-video and/or multi-way audio WebRTC session as a viewer for an input channel. If there’s no existing session for the channel, create a new streaming session and provide the Amazon Resource Name (ARN) of the signaling channel (channelArn) and client id (clientId). Currently for SINGLE_MASTER type, a video producing device is able to ingest both audio and video media into a stream, while viewers can only ingest audio. Both a video producing device and viewers can join a session first and wait for other participants. While participants are having peer to peer conversations through WebRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple viewers are able to playback real-time media. Customers can also use existing Kinesis Video Streams features like HLS or DASH playback, Image generation, and more with ingested WebRTC media. If there’s an existing session with the same clientId that's found in the join session request, the new request takes precedence. + @Sendable + public func joinStorageSessionAsViewer(_ input: JoinStorageSessionAsViewerInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "JoinStorageSessionAsViewer", + path: "/joinStorageSessionAsViewer", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } } extension KinesisVideoWebRTCStorage { diff --git a/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_shapes.swift b/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_shapes.swift index abcdaafe27..b5b8c68805 100644 --- a/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_shapes.swift +++ b/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_shapes.swift @@ -28,6 +28,30 @@ extension KinesisVideoWebRTCStorage { // MARK: Shapes + public struct JoinStorageSessionAsViewerInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the signaling channel. + public let channelArn: String + /// The unique identifier for the sender client. + public let clientId: String + + public init(channelArn: String, clientId: String) { + self.channelArn = channelArn + self.clientId = clientId + } + + public func validate(name: String) throws { + try self.validate(self.channelArn, name: "channelArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*):kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+$") + try self.validate(self.clientId, name: "clientId", parent: name, max: 256) + try self.validate(self.clientId, name: "clientId", parent: name, min: 1) + try self.validate(self.clientId, name: "clientId", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + } + + private enum CodingKeys: String, CodingKey { + case channelArn = "channelArn" + case clientId = "clientId" + } + } + public struct JoinStorageSessionInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the signaling channel. public let channelArn: String diff --git a/Sources/Soto/Services/LexModelsV2/LexModelsV2_api.swift b/Sources/Soto/Services/LexModelsV2/LexModelsV2_api.swift index 8b7f8cebe1..7cea91976e 100644 --- a/Sources/Soto/Services/LexModelsV2/LexModelsV2_api.swift +++ b/Sources/Soto/Services/LexModelsV2/LexModelsV2_api.swift @@ -228,7 +228,7 @@ public struct LexModelsV2: AWSService { ) } - /// Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created. You can't create a resource policy statement that allows cross-account access. + /// Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created. You can't create a resource policy statement that allows cross-account access. You need to add the CreateResourcePolicy or UpdateResourcePolicy action to the bot role in order to call the API. @Sendable public func createResourcePolicyStatement(_ input: CreateResourcePolicyStatementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateResourcePolicyStatementResponse { return try await self.client.execute( @@ -423,7 +423,7 @@ public struct LexModelsV2: AWSService { ) } - /// Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an exception. + /// Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an exception. You need to add the DeleteResourcePolicy or UpdateResourcePolicy action to the bot role in order to call the API. @Sendable public func deleteResourcePolicyStatement(_ input: DeleteResourcePolicyStatementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteResourcePolicyStatementResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/LexModelsV2/LexModelsV2_shapes.swift b/Sources/Soto/Services/LexModelsV2/LexModelsV2_shapes.swift index a3f535cb6d..d2ffb9c245 100644 --- a/Sources/Soto/Services/LexModelsV2/LexModelsV2_shapes.swift +++ b/Sources/Soto/Services/LexModelsV2/LexModelsV2_shapes.swift @@ -241,6 +241,12 @@ extension LexModelsV2 { public var description: String { return self.rawValue } } + public enum BedrockTraceStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum BotAliasReplicationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "Available" case creating = "Creating" @@ -1858,12 +1864,42 @@ extension LexModelsV2 { } } + public struct BedrockGuardrailConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The unique guardrail id for the Bedrock guardrail configuration. + public let identifier: String + /// The guardrail version for the Bedrock guardrail configuration. + public let version: String + + public init(identifier: String, version: String) { + self.identifier = identifier + self.version = version + } + + public func validate(name: String) throws { + try self.validate(self.identifier, name: "identifier", parent: name, max: 2048) + try self.validate(self.identifier, name: "identifier", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$") + try self.validate(self.version, name: "version", parent: name, pattern: "^(([1-9][0-9]{0,7})|(DRAFT))$") + } + + private enum CodingKeys: String, CodingKey { + case identifier = "identifier" + case version = "version" + } + } + public struct BedrockKnowledgeStoreConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The ARN of the knowledge base used. + /// The base ARN of the knowledge base used. public let bedrockKnowledgeBaseArn: String + /// Specifies whether to return an exact response, or to return an answer generated by the model, using the fields you specify from the database. + public let exactResponse: Bool? + /// Contains the names of the fields used for an exact response to the user. + public let exactResponseFields: BedrockKnowledgeStoreExactResponseFields? - public init(bedrockKnowledgeBaseArn: String) { + public init(bedrockKnowledgeBaseArn: String, exactResponse: Bool? = nil, exactResponseFields: BedrockKnowledgeStoreExactResponseFields? = nil) { self.bedrockKnowledgeBaseArn = bedrockKnowledgeBaseArn + self.exactResponse = exactResponse + self.exactResponseFields = exactResponseFields } public func validate(name: String) throws { @@ -1874,23 +1910,53 @@ extension LexModelsV2 { private enum CodingKeys: String, CodingKey { case bedrockKnowledgeBaseArn = "bedrockKnowledgeBaseArn" + case exactResponse = "exactResponse" + case exactResponseFields = "exactResponseFields" + } + } + + public struct BedrockKnowledgeStoreExactResponseFields: AWSEncodableShape & AWSDecodableShape { + /// The answer field used for an exact response from Bedrock Knowledge Store. + public let answerField: String? + + public init(answerField: String? = nil) { + self.answerField = answerField + } + + private enum CodingKeys: String, CodingKey { + case answerField = "answerField" } } public struct BedrockModelSpecification: AWSEncodableShape & AWSDecodableShape { + /// The custom prompt used in the Bedrock model specification details. + public let customPrompt: String? + /// The guardrail configuration in the Bedrock model specification details. + public let guardrail: BedrockGuardrailConfiguration? /// The ARN of the foundation model used in descriptive bot building. public let modelArn: String + /// The Bedrock trace status in the Bedrock model specification details. + public let traceStatus: BedrockTraceStatus? - public init(modelArn: String) { + public init(customPrompt: String? = nil, guardrail: BedrockGuardrailConfiguration? = nil, modelArn: String, traceStatus: BedrockTraceStatus? = nil) { + self.customPrompt = customPrompt + self.guardrail = guardrail self.modelArn = modelArn + self.traceStatus = traceStatus } public func validate(name: String) throws { + try self.validate(self.customPrompt, name: "customPrompt", parent: name, max: 4000) + try self.validate(self.customPrompt, name: "customPrompt", parent: name, min: 1) + try self.guardrail?.validate(name: "\(name).guardrail") try self.validate(self.modelArn, name: "modelArn", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model\\/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}$") } private enum CodingKeys: String, CodingKey { + case customPrompt = "customPrompt" + case guardrail = "guardrail" case modelArn = "modelArn" + case traceStatus = "traceStatus" } } diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index 739f64c068..dcdc046660 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -10975,10 +10975,13 @@ extension MediaLive { } } - public struct MultiplexProgramPacketIdentifiersMap: AWSDecodableShape { + public struct MultiplexProgramPacketIdentifiersMap: AWSEncodableShape & AWSDecodableShape { + public let aribCaptionsPid: Int? public let audioPids: [Int]? public let dvbSubPids: [Int]? public let dvbTeletextPid: Int? + public let dvbTeletextPids: [Int]? + public let ecmPid: Int? public let etvPlatformPid: Int? public let etvSignalPid: Int? public let klvDataPids: [Int]? @@ -10987,13 +10990,17 @@ extension MediaLive { public let privateMetadataPid: Int? public let scte27Pids: [Int]? public let scte35Pid: Int? + public let smpte2038Pid: Int? public let timedMetadataPid: Int? public let videoPid: Int? - public init(audioPids: [Int]? = nil, dvbSubPids: [Int]? = nil, dvbTeletextPid: Int? = nil, etvPlatformPid: Int? = nil, etvSignalPid: Int? = nil, klvDataPids: [Int]? = nil, pcrPid: Int? = nil, pmtPid: Int? = nil, privateMetadataPid: Int? = nil, scte27Pids: [Int]? = nil, scte35Pid: Int? = nil, timedMetadataPid: Int? = nil, videoPid: Int? = nil) { + public init(aribCaptionsPid: Int? = nil, audioPids: [Int]? = nil, dvbSubPids: [Int]? = nil, dvbTeletextPid: Int? = nil, dvbTeletextPids: [Int]? = nil, ecmPid: Int? = nil, etvPlatformPid: Int? = nil, etvSignalPid: Int? = nil, klvDataPids: [Int]? = nil, pcrPid: Int? = nil, pmtPid: Int? = nil, privateMetadataPid: Int? = nil, scte27Pids: [Int]? = nil, scte35Pid: Int? = nil, smpte2038Pid: Int? = nil, timedMetadataPid: Int? = nil, videoPid: Int? = nil) { + self.aribCaptionsPid = aribCaptionsPid self.audioPids = audioPids self.dvbSubPids = dvbSubPids self.dvbTeletextPid = dvbTeletextPid + self.dvbTeletextPids = dvbTeletextPids + self.ecmPid = ecmPid self.etvPlatformPid = etvPlatformPid self.etvSignalPid = etvSignalPid self.klvDataPids = klvDataPids @@ -11002,14 +11009,18 @@ extension MediaLive { self.privateMetadataPid = privateMetadataPid self.scte27Pids = scte27Pids self.scte35Pid = scte35Pid + self.smpte2038Pid = smpte2038Pid self.timedMetadataPid = timedMetadataPid self.videoPid = videoPid } private enum CodingKeys: String, CodingKey { + case aribCaptionsPid = "aribCaptionsPid" case audioPids = "audioPids" case dvbSubPids = "dvbSubPids" case dvbTeletextPid = "dvbTeletextPid" + case dvbTeletextPids = "dvbTeletextPids" + case ecmPid = "ecmPid" case etvPlatformPid = "etvPlatformPid" case etvSignalPid = "etvSignalPid" case klvDataPids = "klvDataPids" @@ -11018,6 +11029,7 @@ extension MediaLive { case privateMetadataPid = "privateMetadataPid" case scte27Pids = "scte27Pids" case scte35Pid = "scte35Pid" + case smpte2038Pid = "smpte2038Pid" case timedMetadataPid = "timedMetadataPid" case videoPid = "videoPid" } @@ -14912,11 +14924,13 @@ extension MediaLive { public let multiplexSettings: MultiplexSettings? /// Name of the multiplex. public let name: String? + public let packetIdentifiersMapping: [String: MultiplexProgramPacketIdentifiersMap]? - public init(multiplexId: String, multiplexSettings: MultiplexSettings? = nil, name: String? = nil) { + public init(multiplexId: String, multiplexSettings: MultiplexSettings? = nil, name: String? = nil, packetIdentifiersMapping: [String: MultiplexProgramPacketIdentifiersMap]? = nil) { self.multiplexId = multiplexId self.multiplexSettings = multiplexSettings self.name = name + self.packetIdentifiersMapping = packetIdentifiersMapping } public func encode(to encoder: Encoder) throws { @@ -14925,6 +14939,7 @@ extension MediaLive { request.encodePath(self.multiplexId, key: "MultiplexId") try container.encodeIfPresent(self.multiplexSettings, forKey: .multiplexSettings) try container.encodeIfPresent(self.name, forKey: .name) + try container.encodeIfPresent(self.packetIdentifiersMapping, forKey: .packetIdentifiersMapping) } public func validate(name: String) throws { @@ -14934,6 +14949,7 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case multiplexSettings = "multiplexSettings" case name = "name" + case packetIdentifiersMapping = "packetIdentifiersMapping" } } diff --git a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift index cde7ee3ee6..ee58e0ad26 100644 --- a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift +++ b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift @@ -80,6 +80,7 @@ extension MediaPackageV2 { public enum DrmSystem: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case clearKeyAes128 = "CLEAR_KEY_AES_128" case fairplay = "FAIRPLAY" + case irdeto = "IRDETO" case playready = "PLAYREADY" case widevine = "WIDEVINE" public var description: String { return self.rawValue } diff --git a/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift b/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift index 924d9a811c..2a953da907 100644 --- a/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift +++ b/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift @@ -83,6 +83,37 @@ extension MedicalImaging { public var description: String { return self.rawValue } } + public enum MetadataUpdates: AWSEncodableShape, Sendable { + /// The object containing removableAttributes and updatableAttributes. + case dicomUpdates(DICOMUpdates) + /// Specifies the previous image set version ID to revert the current image set back to. You must provide either revertToVersionId or DICOMUpdates in your request. A ValidationException error is thrown if both parameters are provided at the same time. + case revertToVersionId(String) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .dicomUpdates(let value): + try container.encode(value, forKey: .dicomUpdates) + case .revertToVersionId(let value): + try container.encode(value, forKey: .revertToVersionId) + } + } + + public func validate(name: String) throws { + switch self { + case .dicomUpdates(let value): + try value.validate(name: "\(name).dicomUpdates") + case .revertToVersionId(let value): + try self.validate(value, name: "revertToVersionId", parent: name, pattern: "^\\d+$") + } + } + + private enum CodingKeys: String, CodingKey { + case dicomUpdates = "DICOMUpdates" + case revertToVersionId = "revertToVersionId" + } + } + public enum SearchByAttributeValue: AWSEncodableShape, Sendable { /// The created at time of the image set provided for search. case createdAt(Date) @@ -126,19 +157,19 @@ extension MedicalImaging { public func validate(name: String) throws { switch self { case .dicomAccessionNumber(let value): - try self.validate(value, name: "dicomAccessionNumber", parent: name, max: 16) + try self.validate(value, name: "dicomAccessionNumber", parent: name, max: 256) case .dicomPatientId(let value): - try self.validate(value, name: "dicomPatientId", parent: name, max: 64) + try self.validate(value, name: "dicomPatientId", parent: name, max: 256) case .dicomSeriesInstanceUID(let value): - try self.validate(value, name: "dicomSeriesInstanceUID", parent: name, max: 64) - try self.validate(value, name: "dicomSeriesInstanceUID", parent: name, pattern: "^(?:[1-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$") + try self.validate(value, name: "dicomSeriesInstanceUID", parent: name, max: 256) + try self.validate(value, name: "dicomSeriesInstanceUID", parent: name, pattern: "^(?:[0-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$") case .dicomStudyDateAndTime(let value): try value.validate(name: "\(name).dicomStudyDateAndTime") case .dicomStudyId(let value): try self.validate(value, name: "dicomStudyId", parent: name, max: 16) case .dicomStudyInstanceUID(let value): - try self.validate(value, name: "dicomStudyInstanceUID", parent: name, max: 64) - try self.validate(value, name: "dicomStudyInstanceUID", parent: name, pattern: "^(?:[1-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$") + try self.validate(value, name: "dicomStudyInstanceUID", parent: name, max: 256) + try self.validate(value, name: "dicomStudyInstanceUID", parent: name, pattern: "^(?:[0-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$") default: break } @@ -244,12 +275,15 @@ extension MedicalImaging { public let copyImageSetInformation: CopyImageSetInformation /// The data store identifier. public let datastoreId: String + /// Setting this flag will force the CopyImageSet operation, even if Patient, Study, or Series level metadata are mismatched across the sourceImageSet and destinationImageSet. + public let force: Bool? /// The source image set identifier. public let sourceImageSetId: String - public init(copyImageSetInformation: CopyImageSetInformation, datastoreId: String, sourceImageSetId: String) { + public init(copyImageSetInformation: CopyImageSetInformation, datastoreId: String, force: Bool? = nil, sourceImageSetId: String) { self.copyImageSetInformation = copyImageSetInformation self.datastoreId = datastoreId + self.force = force self.sourceImageSetId = sourceImageSetId } @@ -258,6 +292,7 @@ extension MedicalImaging { var container = encoder.singleValueContainer() try container.encode(self.copyImageSetInformation) request.encodePath(self.datastoreId, key: "datastoreId") + request.encodeQuery(self.force, key: "force") request.encodePath(self.sourceImageSetId, key: "sourceImageSetId") } @@ -292,18 +327,23 @@ extension MedicalImaging { } public struct CopySourceImageSetInformation: AWSEncodableShape { + /// Contains MetadataCopies structure and wraps information related to specific copy use cases. For example, when copying subsets. + public let dicomCopies: MetadataCopies? /// The latest version identifier for the source image set. public let latestVersionId: String - public init(latestVersionId: String) { + public init(dicomCopies: MetadataCopies? = nil, latestVersionId: String) { + self.dicomCopies = dicomCopies self.latestVersionId = latestVersionId } public func validate(name: String) throws { + try self.dicomCopies?.validate(name: "\(name).dicomCopies") try self.validate(self.latestVersionId, name: "latestVersionId", parent: name, pattern: "^\\d+$") } private enum CodingKeys: String, CodingKey { + case dicomCopies = "DICOMCopies" case latestVersionId = "latestVersionId" } } @@ -1006,12 +1046,14 @@ extension MedicalImaging { public let imageSetWorkflowStatus: ImageSetWorkflowStatus? /// The error message thrown if an image set action fails. public let message: String? + /// This object contains the details of any overrides used while creating a specific image set version. If an image set was copied or updated using the force flag, this object will contain the forced flag. + public let overrides: Overrides? /// The timestamp when image set properties were updated. public let updatedAt: Date? /// The image set version identifier. public let versionId: String - public init(createdAt: Date? = nil, datastoreId: String, deletedAt: Date? = nil, imageSetArn: String? = nil, imageSetId: String, imageSetState: ImageSetState, imageSetWorkflowStatus: ImageSetWorkflowStatus? = nil, message: String? = nil, updatedAt: Date? = nil, versionId: String) { + public init(createdAt: Date? = nil, datastoreId: String, deletedAt: Date? = nil, imageSetArn: String? = nil, imageSetId: String, imageSetState: ImageSetState, imageSetWorkflowStatus: ImageSetWorkflowStatus? = nil, message: String? = nil, overrides: Overrides? = nil, updatedAt: Date? = nil, versionId: String) { self.createdAt = createdAt self.datastoreId = datastoreId self.deletedAt = deletedAt @@ -1020,6 +1062,7 @@ extension MedicalImaging { self.imageSetState = imageSetState self.imageSetWorkflowStatus = imageSetWorkflowStatus self.message = message + self.overrides = overrides self.updatedAt = updatedAt self.versionId = versionId } @@ -1033,6 +1076,7 @@ extension MedicalImaging { case imageSetState = "imageSetState" case imageSetWorkflowStatus = "imageSetWorkflowStatus" case message = "message" + case overrides = "overrides" case updatedAt = "updatedAt" case versionId = "versionId" } @@ -1068,18 +1112,21 @@ extension MedicalImaging { public let imageSetWorkflowStatus: ImageSetWorkflowStatus? /// The error message thrown if an image set action fails. public let message: String? + /// Contains details on overrides used when creating the returned version of an image set. For example, if forced exists, the forced flag was used when creating the image set. + public let overrides: Overrides? /// The timestamp when the image set properties were updated. public let updatedAt: Date? /// The image set version identifier. public let versionId: String - public init(createdAt: Date? = nil, deletedAt: Date? = nil, imageSetId: String, imageSetState: ImageSetState, imageSetWorkflowStatus: ImageSetWorkflowStatus? = nil, message: String? = nil, updatedAt: Date? = nil, versionId: String) { + public init(createdAt: Date? = nil, deletedAt: Date? = nil, imageSetId: String, imageSetState: ImageSetState, imageSetWorkflowStatus: ImageSetWorkflowStatus? = nil, message: String? = nil, overrides: Overrides? = nil, updatedAt: Date? = nil, versionId: String) { self.createdAt = createdAt self.deletedAt = deletedAt self.imageSetId = imageSetId self.imageSetState = imageSetState self.imageSetWorkflowStatus = imageSetWorkflowStatus self.message = message + self.overrides = overrides self.updatedAt = updatedAt self.versionId = versionId } @@ -1091,6 +1138,7 @@ extension MedicalImaging { case imageSetState = "imageSetState" case imageSetWorkflowStatus = "ImageSetWorkflowStatus" case message = "message" + case overrides = "overrides" case updatedAt = "updatedAt" case versionId = "versionId" } @@ -1314,6 +1362,37 @@ extension MedicalImaging { } } + public struct MetadataCopies: AWSEncodableShape { + /// The JSON string used to specify a subset of SOP Instances to copy from source to destination image set. + public let copiableAttributes: String + + public init(copiableAttributes: String) { + self.copiableAttributes = copiableAttributes + } + + public func validate(name: String) throws { + try self.validate(self.copiableAttributes, name: "copiableAttributes", parent: name, max: 260000) + try self.validate(self.copiableAttributes, name: "copiableAttributes", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case copiableAttributes = "copiableAttributes" + } + } + + public struct Overrides: AWSDecodableShape { + /// Setting this flag will force the CopyImageSet and UpdateImageSetMetadata operations, even if Patient, Study, or Series level metadata are mismatched. + public let forced: Bool? + + public init(forced: Bool? = nil) { + self.forced = forced + } + + private enum CodingKeys: String, CodingKey { + case forced = "forced" + } + } + public struct SearchCriteria: AWSEncodableShape { /// The filters for the search criteria. public let filters: [SearchFilter]? @@ -1605,6 +1684,8 @@ extension MedicalImaging { public struct UpdateImageSetMetadataRequest: AWSEncodableShape { /// The data store identifier. public let datastoreId: String + /// Setting this flag will force the UpdateImageSetMetadata operation for the following attributes: Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, and Tag.StudyID Adding, removing, or updating private tags for an individual SOP Instance + public let force: Bool? /// The image set identifier. public let imageSetId: String /// The latest image set version identifier. @@ -1612,8 +1693,9 @@ extension MedicalImaging { /// Update image set metadata updates. public let updateImageSetMetadataUpdates: MetadataUpdates - public init(datastoreId: String, imageSetId: String, latestVersionId: String, updateImageSetMetadataUpdates: MetadataUpdates) { + public init(datastoreId: String, force: Bool? = nil, imageSetId: String, latestVersionId: String, updateImageSetMetadataUpdates: MetadataUpdates) { self.datastoreId = datastoreId + self.force = force self.imageSetId = imageSetId self.latestVersionId = latestVersionId self.updateImageSetMetadataUpdates = updateImageSetMetadataUpdates @@ -1623,6 +1705,7 @@ extension MedicalImaging { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.singleValueContainer() request.encodePath(self.datastoreId, key: "datastoreId") + request.encodeQuery(self.force, key: "force") request.encodePath(self.imageSetId, key: "imageSetId") request.encodeQuery(self.latestVersionId, key: "latestVersion") try container.encode(self.updateImageSetMetadataUpdates) @@ -1678,23 +1761,6 @@ extension MedicalImaging { case updatedAt = "updatedAt" } } - - public struct MetadataUpdates: AWSEncodableShape { - /// The object containing removableAttributes and updatableAttributes. - public let dicomUpdates: DICOMUpdates? - - public init(dicomUpdates: DICOMUpdates? = nil) { - self.dicomUpdates = dicomUpdates - } - - public func validate(name: String) throws { - try self.dicomUpdates?.validate(name: "\(name).dicomUpdates") - } - - private enum CodingKeys: String, CodingKey { - case dicomUpdates = "DICOMUpdates" - } - } } // MARK: - Errors diff --git a/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift b/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift index 41b9511021..73b1a2bb99 100644 --- a/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift +++ b/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS MemoryDB service. /// -/// MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands. +/// MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands. public struct MemoryDB: AWSService { // MARK: Member variables @@ -198,7 +198,7 @@ public struct MemoryDB: AWSService { ) } - /// Deletes a cluster. It also deletes all associated nodes and node endpoints + /// Deletes a cluster. It also deletes all associated nodes and node endpoints CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. @Sendable public func deleteCluster(_ input: DeleteClusterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteClusterResponse { return try await self.client.execute( @@ -289,7 +289,7 @@ public struct MemoryDB: AWSService { ) } - /// Returns a list of the available Redis engine versions. + /// Returns a list of the available Redis OSS engine versions. @Sendable public func describeEngineVersions(_ input: DescribeEngineVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEngineVersionsResponse { return try await self.client.execute( @@ -629,7 +629,7 @@ extension MemoryDB { ) } - /// Returns a list of the available Redis engine versions. + /// Returns a list of the available Redis OSS engine versions. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift b/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift index 260c927ba8..cbebaa8a95 100644 --- a/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift +++ b/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift @@ -247,9 +247,9 @@ extension MemoryDB { public let dataTiering: DataTieringStatus? /// A description of the cluster public let description: String? - /// The Redis engine patch version used by the cluster + /// The Redis OSS engine patch version used by the cluster public let enginePatchVersion: String? - /// The Redis engine version used by the cluster + /// The Redis OSS engine version used by the cluster public let engineVersion: String? /// The ID of the KMS key used to encrypt the cluster public let kmsKeyId: String? @@ -348,7 +348,7 @@ extension MemoryDB { public struct ClusterConfiguration: AWSDecodableShape { /// The description of the cluster configuration public let description: String? - /// The Redis engine version used by the cluster + /// The Redis OSS engine version used by the cluster public let engineVersion: String? /// The specified maintenance window for the cluster public let maintenanceWindow: String? @@ -534,7 +534,7 @@ extension MemoryDB { public let dataTiering: Bool? /// An optional description of the cluster. public let description: String? - /// The version number of the Redis engine to be used for the cluster. + /// The version number of the Redis OSS engine to be used for the cluster. public let engineVersion: String? /// The ID of the KMS key used to encrypt the cluster. public let kmsKeyId: String? @@ -1059,7 +1059,7 @@ extension MemoryDB { public struct DescribeEngineVersionsRequest: AWSEncodableShape { /// If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. public let defaultOnly: Bool? - /// The Redis engine version + /// The Redis OSS engine version public let engineVersion: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. public let maxResults: Int? diff --git a/Sources/Soto/Services/Mobile/Mobile_api.swift b/Sources/Soto/Services/Mobile/Mobile_api.swift deleted file mode 100644 index 1675e81df9..0000000000 --- a/Sources/Soto/Services/Mobile/Mobile_api.swift +++ /dev/null @@ -1,264 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -@_exported import SotoCore - -/// Service object for interacting with AWS Mobile service. -/// -/// AWS Mobile Service provides mobile app and website developers with capabilities required to configure AWS resources and bootstrap their developer desktop projects with the necessary SDKs, constants, tools and samples to make use of those resources. -public struct Mobile: AWSService { - // MARK: Member variables - - /// Client used for communication with AWS - public let client: AWSClient - /// Service configuration - public let config: AWSServiceConfig - - // MARK: Initialization - - /// Initialize the Mobile client - /// - parameters: - /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. - /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). - /// - endpoint: Custom endpoint URL to use instead of standard AWS servers - /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded - /// - timeout: Timeout value for HTTP requests - /// - byteBufferAllocator: Allocator for ByteBuffers - /// - options: Service options - public init( - client: AWSClient, - region: SotoCore.Region? = nil, - partition: AWSPartition = .aws, - endpoint: String? = nil, - middleware: AWSMiddlewareProtocol? = nil, - timeout: TimeAmount? = nil, - byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), - options: AWSServiceConfig.Options = [] - ) { - self.client = client - self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, - serviceName: "Mobile", - serviceIdentifier: "mobile", - signingName: "AWSMobileHubService", - serviceProtocol: .restjson, - apiVersion: "2017-07-01", - endpoint: endpoint, - errorType: MobileErrorType.self, - xmlNamespace: "http://mobile.amazonaws.com", - middleware: middleware, - timeout: timeout, - byteBufferAllocator: byteBufferAllocator, - options: options - ) - } - - - - - - // MARK: API Calls - - /// Creates an AWS Mobile Hub project. - @Sendable - public func createProject(_ input: CreateProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateProjectResult { - return try await self.client.execute( - operation: "CreateProject", - path: "/projects", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Delets a project in AWS Mobile Hub. - @Sendable - public func deleteProject(_ input: DeleteProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteProjectResult { - return try await self.client.execute( - operation: "DeleteProject", - path: "/projects/{projectId}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Get the bundle details for the requested bundle id. - @Sendable - public func describeBundle(_ input: DescribeBundleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeBundleResult { - return try await self.client.execute( - operation: "DescribeBundle", - path: "/bundles/{bundleId}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Gets details about a project in AWS Mobile Hub. - @Sendable - public func describeProject(_ input: DescribeProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeProjectResult { - return try await self.client.execute( - operation: "DescribeProject", - path: "/project", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Generates customized software development kit (SDK) and or tool packages used to integrate mobile web or mobile app clients with backend AWS resources. - @Sendable - public func exportBundle(_ input: ExportBundleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExportBundleResult { - return try await self.client.execute( - operation: "ExportBundle", - path: "/bundles/{bundleId}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Exports project configuration to a snapshot which can be downloaded and shared. Note that mobile app push credentials are encrypted in exported projects, so they can only be shared successfully within the same AWS account. - @Sendable - public func exportProject(_ input: ExportProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExportProjectResult { - return try await self.client.execute( - operation: "ExportProject", - path: "/exports/{projectId}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// List all available bundles. - @Sendable - public func listBundles(_ input: ListBundlesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBundlesResult { - return try await self.client.execute( - operation: "ListBundles", - path: "/bundles", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Lists projects in AWS Mobile Hub. - @Sendable - public func listProjects(_ input: ListProjectsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListProjectsResult { - return try await self.client.execute( - operation: "ListProjects", - path: "/projects", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Update an existing project. - @Sendable - public func updateProject(_ input: UpdateProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateProjectResult { - return try await self.client.execute( - operation: "UpdateProject", - path: "/update", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } -} - -extension Mobile { - /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public - /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. - public init(from: Mobile, patch: AWSServiceConfig.Patch) { - self.client = from.client - self.config = from.config.with(patch: patch) - } -} - -// MARK: Paginators - -@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) -extension Mobile { - /// List all available bundles. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listBundlesPaginator( - _ input: ListBundlesRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listBundles, - inputKey: \ListBundlesRequest.nextToken, - outputKey: \ListBundlesResult.nextToken, - logger: logger - ) - } - - /// Lists projects in AWS Mobile Hub. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listProjectsPaginator( - _ input: ListProjectsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listProjects, - inputKey: \ListProjectsRequest.nextToken, - outputKey: \ListProjectsResult.nextToken, - logger: logger - ) - } -} - -extension Mobile.ListBundlesRequest: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> Mobile.ListBundlesRequest { - return .init( - maxResults: self.maxResults, - nextToken: token - ) - } -} - -extension Mobile.ListProjectsRequest: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> Mobile.ListProjectsRequest { - return .init( - maxResults: self.maxResults, - nextToken: token - ) - } -} diff --git a/Sources/Soto/Services/Mobile/Mobile_shapes.swift b/Sources/Soto/Services/Mobile/Mobile_shapes.swift deleted file mode 100644 index 857690a7ad..0000000000 --- a/Sources/Soto/Services/Mobile/Mobile_shapes.swift +++ /dev/null @@ -1,538 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_spi(SotoInternal) import SotoCore - -extension Mobile { - // MARK: Enums - - public enum Platform: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case android = "ANDROID" - case javascript = "JAVASCRIPT" - case linux = "LINUX" - case objc = "OBJC" - case osx = "OSX" - case swift = "SWIFT" - case windows = "WINDOWS" - public var description: String { return self.rawValue } - } - - public enum ProjectState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case importing = "IMPORTING" - case normal = "NORMAL" - case syncing = "SYNCING" - public var description: String { return self.rawValue } - } - - // MARK: Shapes - - public struct BundleDetails: AWSDecodableShape { - public let availablePlatforms: [Platform]? - public let bundleId: String? - public let description: String? - public let iconUrl: String? - public let title: String? - public let version: String? - - public init(availablePlatforms: [Platform]? = nil, bundleId: String? = nil, description: String? = nil, iconUrl: String? = nil, title: String? = nil, version: String? = nil) { - self.availablePlatforms = availablePlatforms - self.bundleId = bundleId - self.description = description - self.iconUrl = iconUrl - self.title = title - self.version = version - } - - private enum CodingKeys: String, CodingKey { - case availablePlatforms = "availablePlatforms" - case bundleId = "bundleId" - case description = "description" - case iconUrl = "iconUrl" - case title = "title" - case version = "version" - } - } - - public struct CreateProjectRequest: AWSEncodableShape { - /// ZIP or YAML file which contains configuration settings to be used when creating the project. This may be the contents of the file downloaded from the URL provided in an export project operation. - public let contents: AWSHTTPBody? - /// Name of the project. - public let name: String? - /// Default region where project resources should be created. - public let region: String? - /// Unique identifier for an exported snapshot of project configuration. This snapshot identifier is included in the share URL when a project is exported. - public let snapshotId: String? - - public init(contents: AWSHTTPBody? = nil, name: String? = nil, region: String? = nil, snapshotId: String? = nil) { - self.contents = contents - self.name = name - self.region = region - self.snapshotId = snapshotId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.singleValueContainer() - try container.encode(self.contents) - request.encodeQuery(self.name, key: "name") - request.encodeQuery(self.region, key: "region") - request.encodeQuery(self.snapshotId, key: "snapshotId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct CreateProjectResult: AWSDecodableShape { - /// Detailed information about the created AWS Mobile Hub project. - public let details: ProjectDetails? - - public init(details: ProjectDetails? = nil) { - self.details = details - } - - private enum CodingKeys: String, CodingKey { - case details = "details" - } - } - - public struct DeleteProjectRequest: AWSEncodableShape { - /// Unique project identifier. - public let projectId: String - - public init(projectId: String) { - self.projectId = projectId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.projectId, key: "projectId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DeleteProjectResult: AWSDecodableShape { - /// Resources which were deleted. - public let deletedResources: [Resource]? - /// Resources which were not deleted, due to a risk of losing potentially important data or files. - public let orphanedResources: [Resource]? - - public init(deletedResources: [Resource]? = nil, orphanedResources: [Resource]? = nil) { - self.deletedResources = deletedResources - self.orphanedResources = orphanedResources - } - - private enum CodingKeys: String, CodingKey { - case deletedResources = "deletedResources" - case orphanedResources = "orphanedResources" - } - } - - public struct DescribeBundleRequest: AWSEncodableShape { - /// Unique bundle identifier. - public let bundleId: String - - public init(bundleId: String) { - self.bundleId = bundleId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.bundleId, key: "bundleId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DescribeBundleResult: AWSDecodableShape { - /// The details of the bundle. - public let details: BundleDetails? - - public init(details: BundleDetails? = nil) { - self.details = details - } - - private enum CodingKeys: String, CodingKey { - case details = "details" - } - } - - public struct DescribeProjectRequest: AWSEncodableShape { - /// Unique project identifier. - public let projectId: String - /// If set to true, causes AWS Mobile Hub to synchronize information from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub project. - public let syncFromResources: Bool? - - public init(projectId: String, syncFromResources: Bool? = nil) { - self.projectId = projectId - self.syncFromResources = syncFromResources - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.projectId, key: "projectId") - request.encodeQuery(self.syncFromResources, key: "syncFromResources") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DescribeProjectResult: AWSDecodableShape { - public let details: ProjectDetails? - - public init(details: ProjectDetails? = nil) { - self.details = details - } - - private enum CodingKeys: String, CodingKey { - case details = "details" - } - } - - public struct ExportBundleRequest: AWSEncodableShape { - /// Unique bundle identifier. - public let bundleId: String - /// Developer desktop or target application platform. - public let platform: Platform? - /// Unique project identifier. - public let projectId: String? - - public init(bundleId: String, platform: Platform? = nil, projectId: String? = nil) { - self.bundleId = bundleId - self.platform = platform - self.projectId = projectId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.bundleId, key: "bundleId") - request.encodeQuery(self.platform, key: "platform") - request.encodeQuery(self.projectId, key: "projectId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ExportBundleResult: AWSDecodableShape { - /// URL which contains the custom-generated SDK and tool packages used to integrate the client mobile app or web app with the AWS resources created by the AWS Mobile Hub project. - public let downloadUrl: String? - - public init(downloadUrl: String? = nil) { - self.downloadUrl = downloadUrl - } - - private enum CodingKeys: String, CodingKey { - case downloadUrl = "downloadUrl" - } - } - - public struct ExportProjectRequest: AWSEncodableShape { - /// Unique project identifier. - public let projectId: String - - public init(projectId: String) { - self.projectId = projectId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.projectId, key: "projectId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ExportProjectResult: AWSDecodableShape { - /// URL which can be used to download the exported project configuation file(s). - public let downloadUrl: String? - /// URL which can be shared to allow other AWS users to create their own project in AWS Mobile Hub with the same configuration as the specified project. This URL pertains to a snapshot in time of the project configuration that is created when this API is called. If you want to share additional changes to your project configuration, then you will need to create and share a new snapshot by calling this method again. - public let shareUrl: String? - /// Unique identifier for the exported snapshot of the project configuration. This snapshot identifier is included in the share URL. - public let snapshotId: String? - - public init(downloadUrl: String? = nil, shareUrl: String? = nil, snapshotId: String? = nil) { - self.downloadUrl = downloadUrl - self.shareUrl = shareUrl - self.snapshotId = snapshotId - } - - private enum CodingKeys: String, CodingKey { - case downloadUrl = "downloadUrl" - case shareUrl = "shareUrl" - case snapshotId = "snapshotId" - } - } - - public struct ListBundlesRequest: AWSEncodableShape { - /// Maximum number of records to list in a single response. - public let maxResults: Int? - /// Pagination token. Set to null to start listing bundles from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more bundles. - public let nextToken: String? - - public init(maxResults: Int? = nil, nextToken: String? = nil) { - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListBundlesResult: AWSDecodableShape { - /// A list of bundles. - public let bundleList: [BundleDetails]? - /// Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries. - public let nextToken: String? - - public init(bundleList: [BundleDetails]? = nil, nextToken: String? = nil) { - self.bundleList = bundleList - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case bundleList = "bundleList" - case nextToken = "nextToken" - } - } - - public struct ListProjectsRequest: AWSEncodableShape { - /// Maximum number of records to list in a single response. - public let maxResults: Int? - /// Pagination token. Set to null to start listing projects from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more projects. - public let nextToken: String? - - public init(maxResults: Int? = nil, nextToken: String? = nil) { - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListProjectsResult: AWSDecodableShape { - public let nextToken: String? - public let projects: [ProjectSummary]? - - public init(nextToken: String? = nil, projects: [ProjectSummary]? = nil) { - self.nextToken = nextToken - self.projects = projects - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "nextToken" - case projects = "projects" - } - } - - public struct ProjectDetails: AWSDecodableShape { - /// Website URL for this project in the AWS Mobile Hub console. - public let consoleUrl: String? - /// Date the project was created. - public let createdDate: Date? - /// Date of the last modification of the project. - public let lastUpdatedDate: Date? - public let name: String? - public let projectId: String? - public let region: String? - public let resources: [Resource]? - public let state: ProjectState? - - public init(consoleUrl: String? = nil, createdDate: Date? = nil, lastUpdatedDate: Date? = nil, name: String? = nil, projectId: String? = nil, region: String? = nil, resources: [Resource]? = nil, state: ProjectState? = nil) { - self.consoleUrl = consoleUrl - self.createdDate = createdDate - self.lastUpdatedDate = lastUpdatedDate - self.name = name - self.projectId = projectId - self.region = region - self.resources = resources - self.state = state - } - - private enum CodingKeys: String, CodingKey { - case consoleUrl = "consoleUrl" - case createdDate = "createdDate" - case lastUpdatedDate = "lastUpdatedDate" - case name = "name" - case projectId = "projectId" - case region = "region" - case resources = "resources" - case state = "state" - } - } - - public struct ProjectSummary: AWSDecodableShape { - /// Name of the project. - public let name: String? - /// Unique project identifier. - public let projectId: String? - - public init(name: String? = nil, projectId: String? = nil) { - self.name = name - self.projectId = projectId - } - - private enum CodingKeys: String, CodingKey { - case name = "name" - case projectId = "projectId" - } - } - - public struct Resource: AWSDecodableShape { - public let arn: String? - public let attributes: [String: String]? - public let feature: String? - public let name: String? - public let type: String? - - public init(arn: String? = nil, attributes: [String: String]? = nil, feature: String? = nil, name: String? = nil, type: String? = nil) { - self.arn = arn - self.attributes = attributes - self.feature = feature - self.name = name - self.type = type - } - - private enum CodingKeys: String, CodingKey { - case arn = "arn" - case attributes = "attributes" - case feature = "feature" - case name = "name" - case type = "type" - } - } - - public struct UpdateProjectRequest: AWSEncodableShape { - /// ZIP or YAML file which contains project configuration to be updated. This should be the contents of the file downloaded from the URL provided in an export project operation. - public let contents: AWSHTTPBody? - /// Unique project identifier. - public let projectId: String - - public init(contents: AWSHTTPBody? = nil, projectId: String) { - self.contents = contents - self.projectId = projectId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.singleValueContainer() - try container.encode(self.contents) - request.encodeQuery(self.projectId, key: "projectId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct UpdateProjectResult: AWSDecodableShape { - /// Detailed information about the updated AWS Mobile Hub project. - public let details: ProjectDetails? - - public init(details: ProjectDetails? = nil) { - self.details = details - } - - private enum CodingKeys: String, CodingKey { - case details = "details" - } - } -} - -// MARK: - Errors - -/// Error enum for Mobile -public struct MobileErrorType: AWSErrorType { - enum Code: String { - case accountActionRequiredException = "AccountActionRequiredException" - case badRequestException = "BadRequestException" - case internalFailureException = "InternalFailureException" - case limitExceededException = "LimitExceededException" - case notFoundException = "NotFoundException" - case serviceUnavailableException = "ServiceUnavailableException" - case tooManyRequestsException = "TooManyRequestsException" - case unauthorizedException = "UnauthorizedException" - } - - private let error: Code - public let context: AWSErrorContext? - - /// initialize Mobile - public init?(errorCode: String, context: AWSErrorContext) { - guard let error = Code(rawValue: errorCode) else { return nil } - self.error = error - self.context = context - } - - internal init(_ error: Code) { - self.error = error - self.context = nil - } - - /// return error code string - public var errorCode: String { self.error.rawValue } - - /// Account Action is required in order to continue the request. - public static var accountActionRequiredException: Self { .init(.accountActionRequiredException) } - /// The request cannot be processed because some parameter is not valid or the project state prevents the operation from being performed. - public static var badRequestException: Self { .init(.badRequestException) } - /// The service has encountered an unexpected error condition which prevents it from servicing the request. - public static var internalFailureException: Self { .init(.internalFailureException) } - /// There are too many AWS Mobile Hub projects in the account or the account has exceeded the maximum number of resources in some AWS service. You should create another sub-account using AWS Organizations or remove some resources and retry your request. - public static var limitExceededException: Self { .init(.limitExceededException) } - /// No entity can be found with the specified identifier. - public static var notFoundException: Self { .init(.notFoundException) } - /// The service is temporarily unavailable. The request should be retried after some time delay. - public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) } - /// Too many requests have been received for this AWS account in too short a time. The request should be retried after some time delay. - public static var tooManyRequestsException: Self { .init(.tooManyRequestsException) } - /// Credentials of the caller are insufficient to authorize the request. - public static var unauthorizedException: Self { .init(.unauthorizedException) } -} - -extension MobileErrorType: Equatable { - public static func == (lhs: MobileErrorType, rhs: MobileErrorType) -> Bool { - lhs.error == rhs.error - } -} - -extension MobileErrorType: CustomStringConvertible { - public var description: String { - return "\(self.error.rawValue): \(self.message ?? "")" - } -} diff --git a/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift b/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift index 230f066904..4e4fb51d23 100644 --- a/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift +++ b/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift @@ -26,6 +26,11 @@ import Foundation extension NeptuneGraph { // MARK: Enums + public enum BlankNodeHandling: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case convertToIri = "convertToIri" + public var description: String { return self.rawValue } + } + public enum ExplainMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case `static` = "STATIC" case details = "DETAILS" @@ -34,6 +39,7 @@ extension NeptuneGraph { public enum Format: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case csv = "CSV" + case ntriples = "NTRIPLES" case openCypher = "OPEN_CYPHER" public var description: String { return self.rawValue } } @@ -232,7 +238,7 @@ extension NeptuneGraph { try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, min: 1) try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, max: 24576) - try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 128) + try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 32) try self.validate(self.replicaCount, name: "replicaCount", parent: name, max: 2) try self.validate(self.replicaCount, name: "replicaCount", parent: name, min: 0) try self.tags?.forEach { @@ -399,6 +405,8 @@ extension NeptuneGraph { } public struct CreateGraphUsingImportTaskInput: AWSEncodableShape { + /// The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. For more information, see Handling RDF values. + public let blankNodeHandling: BlankNodeHandling? /// Indicates whether or not to enable deletion protection on the graph. The graph can’t be deleted when deletion protection is enabled. (true or false). public let deletionProtection: Bool? /// If set to true, the task halts when an import error is encountered. If set to false, the task skips the data that caused the error and continues if possible. @@ -428,7 +436,8 @@ extension NeptuneGraph { /// Specifies the number of dimensions for vector embeddings that will be loaded into the graph. The value is specified as dimension=value. Max = 65,535 public let vectorSearchConfiguration: VectorSearchConfiguration? - public init(deletionProtection: Bool? = nil, failOnError: Bool? = nil, format: Format? = nil, graphName: String, importOptions: ImportOptions? = nil, kmsKeyIdentifier: String? = nil, maxProvisionedMemory: Int? = nil, minProvisionedMemory: Int? = nil, publicConnectivity: Bool? = nil, replicaCount: Int? = nil, roleArn: String, source: String, tags: [String: String]? = nil, vectorSearchConfiguration: VectorSearchConfiguration? = nil) { + public init(blankNodeHandling: BlankNodeHandling? = nil, deletionProtection: Bool? = nil, failOnError: Bool? = nil, format: Format? = nil, graphName: String, importOptions: ImportOptions? = nil, kmsKeyIdentifier: String? = nil, maxProvisionedMemory: Int? = nil, minProvisionedMemory: Int? = nil, publicConnectivity: Bool? = nil, replicaCount: Int? = nil, roleArn: String, source: String, tags: [String: String]? = nil, vectorSearchConfiguration: VectorSearchConfiguration? = nil) { + self.blankNodeHandling = blankNodeHandling self.deletionProtection = deletionProtection self.failOnError = failOnError self.format = format @@ -453,9 +462,9 @@ extension NeptuneGraph { try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, min: 1) try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") try self.validate(self.maxProvisionedMemory, name: "maxProvisionedMemory", parent: name, max: 24576) - try self.validate(self.maxProvisionedMemory, name: "maxProvisionedMemory", parent: name, min: 128) + try self.validate(self.maxProvisionedMemory, name: "maxProvisionedMemory", parent: name, min: 32) try self.validate(self.minProvisionedMemory, name: "minProvisionedMemory", parent: name, max: 24576) - try self.validate(self.minProvisionedMemory, name: "minProvisionedMemory", parent: name, min: 128) + try self.validate(self.minProvisionedMemory, name: "minProvisionedMemory", parent: name, min: 32) try self.validate(self.replicaCount, name: "replicaCount", parent: name, max: 2) try self.validate(self.replicaCount, name: "replicaCount", parent: name, min: 0) try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") @@ -470,6 +479,7 @@ extension NeptuneGraph { } private enum CodingKeys: String, CodingKey { + case blankNodeHandling = "blankNodeHandling" case deletionProtection = "deletionProtection" case failOnError = "failOnError" case format = "format" @@ -488,7 +498,7 @@ extension NeptuneGraph { } public struct CreateGraphUsingImportTaskOutput: AWSDecodableShape { - /// Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format. + /// Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format, OPENCYPHER, which identifies the openCypher load format, or ntriples, which identifies the RDF n-triples format. public let format: Format? /// The unique identifier of the Neptune Analytics graph. public let graphId: String? @@ -1988,7 +1998,7 @@ extension NeptuneGraph { try self.validate(self.graphName, name: "graphName", parent: name, min: 1) try self.validate(self.graphName, name: "graphName", parent: name, pattern: "^(?!g-)[a-z][a-z0-9]*(-[a-z0-9]+)*$") try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, max: 24576) - try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 128) + try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 32) try self.validate(self.replicaCount, name: "replicaCount", parent: name, max: 2) try self.validate(self.replicaCount, name: "replicaCount", parent: name, min: 0) try self.validate(self.snapshotIdentifier, name: "snapshotIdentifier", parent: name, pattern: "^gs-[a-z0-9]{10}$") @@ -2080,6 +2090,8 @@ extension NeptuneGraph { } public struct StartImportTaskInput: AWSEncodableShape { + /// The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. For more information, see Handling RDF values. + public let blankNodeHandling: BlankNodeHandling? /// If set to true, the task halts when an import error is encountered. If set to false, the task skips the data that caused the error and continues if possible. public let failOnError: Bool? /// Specifies the format of Amazon S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format. @@ -2092,7 +2104,8 @@ extension NeptuneGraph { /// A URL identifying the location of the data to be imported. This can be an Amazon S3 path, or can point to a Neptune database endpoint or snapshot. public let source: String - public init(failOnError: Bool? = nil, format: Format? = nil, graphIdentifier: String, importOptions: ImportOptions? = nil, roleArn: String, source: String) { + public init(blankNodeHandling: BlankNodeHandling? = nil, failOnError: Bool? = nil, format: Format? = nil, graphIdentifier: String, importOptions: ImportOptions? = nil, roleArn: String, source: String) { + self.blankNodeHandling = blankNodeHandling self.failOnError = failOnError self.format = format self.graphIdentifier = graphIdentifier @@ -2104,6 +2117,7 @@ extension NeptuneGraph { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.blankNodeHandling, forKey: .blankNodeHandling) try container.encodeIfPresent(self.failOnError, forKey: .failOnError) try container.encodeIfPresent(self.format, forKey: .format) request.encodePath(self.graphIdentifier, key: "graphIdentifier") @@ -2118,6 +2132,7 @@ extension NeptuneGraph { } private enum CodingKeys: String, CodingKey { + case blankNodeHandling = "blankNodeHandling" case failOnError = "failOnError" case format = "format" case importOptions = "importOptions" @@ -2268,7 +2283,7 @@ extension NeptuneGraph { public func validate(name: String) throws { try self.validate(self.graphIdentifier, name: "graphIdentifier", parent: name, pattern: "^g-[a-z0-9]{10}$") try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, max: 24576) - try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 128) + try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 32) } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift index 736d111db0..4c193a7b0b 100644 --- a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift +++ b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift @@ -152,7 +152,7 @@ public struct NetworkFirewall: AWSService { ) } - /// Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains Certificate Manager certificate associations between and the scope configurations that Network Firewall uses to decrypt and re-encrypt traffic traveling through your firewall. After you create a TLS inspection configuration, you can associate it with a new firewall policy. To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS + /// Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall. To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS /// inspection configurations in the Network Firewall Developer Guide. @Sendable public func createTLSInspectionConfiguration(_ input: CreateTLSInspectionConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTLSInspectionConfigurationResponse { diff --git a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_shapes.swift b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_shapes.swift index 6d4bcf5187..c1b9d2534f 100644 --- a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_shapes.swift +++ b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_shapes.swift @@ -85,6 +85,7 @@ extension NetworkFirewall { public enum LogType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case alert = "ALERT" case flow = "FLOW" + case tls = "TLS" public var description: String { return self.rawValue } } @@ -1991,11 +1992,11 @@ extension NetworkFirewall { } public struct LogDestinationConfig: AWSEncodableShape & AWSDecodableShape { - /// The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type. For an Amazon S3 bucket, provide the name of the bucket, with key bucketName, and optionally provide a prefix, with key prefix. The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts: "LogDestination": { "bucketName": "DOC-EXAMPLE-BUCKET", "prefix": "alerts" } For a CloudWatch log group, provide the name of the CloudWatch log group, with key logGroup. The following example specifies a log group named alert-log-group: "LogDestination": { "logGroup": "alert-log-group" } For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key deliveryStream. The following example specifies a delivery stream named alert-delivery-stream: "LogDestination": { "deliveryStream": "alert-delivery-stream" } + /// The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type. For an Amazon S3 bucket, provide the name of the bucket, with key bucketName, and optionally provide a prefix, with key prefix. The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts: "LogDestination": { "bucketName": "DOC-EXAMPLE-BUCKET", "prefix": "alerts" } For a CloudWatch log group, provide the name of the CloudWatch log group, with key logGroup. The following example specifies a log group named alert-log-group: "LogDestination": { "logGroup": "alert-log-group" } For a Firehose delivery stream, provide the name of the delivery stream, with key deliveryStream. The following example specifies a delivery stream named alert-delivery-stream: "LogDestination": { "deliveryStream": "alert-delivery-stream" } public let logDestination: [String: String] - /// The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream. + /// The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream. public let logDestinationType: LogDestinationType - /// The type of log to send. Alert logs report traffic that matches a StatefulRule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs. + /// The type of log to record. You can record the following types of logs from your Network Firewall stateful engine. ALERT - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see StatefulRule. FLOW - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group. TLS - Logs for events that are related to TLS inspection. For more information, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide. public let logType: LogType public init(logDestination: [String: String], logDestinationType: LogDestinationType, logType: LogType) { @@ -2655,7 +2656,7 @@ extension NetworkFirewall { } public struct StatefulRule: AWSEncodableShape & AWSDecodableShape { - /// Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow. The actions for a stateful rule are defined as follows: PASS - Permits the packets to go to the intended destination. DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration. ALERT - Sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration. You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP. + /// Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow. The actions for a stateful rule are defined as follows: PASS - Permits the packets to go to the intended destination. DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration. ALERT - Sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration. You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP. REJECT - Drops traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and an RST bit contained in the TCP header flags. REJECT is available only for TCP traffic. This option doesn't support FTP or IMAP protocols. public let action: StatefulAction /// The stateful inspection criteria for this rule, used to inspect traffic flows. public let header: Header diff --git a/Sources/Soto/Services/Outposts/Outposts_shapes.swift b/Sources/Soto/Services/Outposts/Outposts_shapes.swift index eec76b519c..b272c0501a 100644 --- a/Sources/Soto/Services/Outposts/Outposts_shapes.swift +++ b/Sources/Soto/Services/Outposts/Outposts_shapes.swift @@ -1273,13 +1273,17 @@ extension Outposts { public struct InstanceTypeItem: AWSDecodableShape { public let instanceType: String? + /// The number of default VCPUs in an instance type. + public let vcpUs: Int? - public init(instanceType: String? = nil) { + public init(instanceType: String? = nil, vcpUs: Int? = nil) { self.instanceType = instanceType + self.vcpUs = vcpUs } private enum CodingKeys: String, CodingKey { case instanceType = "InstanceType" + case vcpUs = "VCPUs" } } diff --git a/Sources/Soto/Services/PI/PI_shapes.swift b/Sources/Soto/Services/PI/PI_shapes.swift index 2a849f9acc..9265eeeebd 100644 --- a/Sources/Soto/Services/PI/PI_shapes.swift +++ b/Sources/Soto/Services/PI/PI_shapes.swift @@ -409,7 +409,7 @@ extension PI { } public struct DimensionGroup: AWSEncodableShape { - /// A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response. Valid values for elements in the Dimensions array are: db.application.name - The name of the application that is connected to the database. Valid values are as follows: Aurora PostgreSQL Amazon RDS PostgreSQL Amazon DocumentDB db.host.id - The host ID of the connected client (all engines). db.host.name - The host name of the connected client (all engines). db.name - The name of the database to which the client is connected. Valid values are as follows: Aurora PostgreSQL Amazon RDS PostgreSQL Aurora MySQL Amazon RDS MySQL Amazon RDS MariaDB Amazon DocumentDB db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB). db.query.db_id - The query ID generated by the database (only Amazon DocumentDB). db.query.statement - The text of the query that is being run (only Amazon DocumentDB). db.query.tokenized_id db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB). db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB). db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB). db.session_type.name - The type of the current session (only Amazon DocumentDB). db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB). db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with pi- (all engines except Amazon DocumentDB). db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees (all engines except Amazon DocumentDB) db.sql.tokenized_id db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console, db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot database issues. db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB). db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id = ? (all engines except Amazon DocumentDB) db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB). db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB). db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB). db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB). db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except Amazon DocumentDB). db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB). + /// A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response. Valid values for elements in the Dimensions array are: db.application.name - The name of the application that is connected to the database. Valid values are as follows: Aurora PostgreSQL Amazon RDS PostgreSQL Amazon DocumentDB db.host.id - The host ID of the connected client (all engines). db.host.name - The host name of the connected client (all engines). db.name - The name of the database to which the client is connected. Valid values are as follows: Aurora PostgreSQL Amazon RDS PostgreSQL Aurora MySQL Amazon RDS MySQL Amazon RDS MariaDB Amazon DocumentDB db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB). db.query.db_id - The query ID generated by the database (only Amazon DocumentDB). db.query.statement - The text of the query that is being run (only Amazon DocumentDB). db.query.tokenized_id db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB). db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB). db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB). db.session_type.name - The type of the current session (only Amazon DocumentDB). db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB). db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with pi- (all engines except Amazon DocumentDB). db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees (all engines except Amazon DocumentDB) db.sql.tokenized_id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). The db.sql.tokenized_id dimension fetches the value of the db.sql_tokenized.id dimension. Amazon RDS returns db.sql.tokenized_id from the db.sql dimension group. db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console, db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot database issues. db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB). db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id = ? (all engines except Amazon DocumentDB) db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB). db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB). db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB). db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB). db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except Amazon DocumentDB). db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB). public let dimensions: [String]? /// The name of the dimension group. Valid values are as follows: db - The name of the database to which the client is connected. The following values are permitted: Aurora PostgreSQL Amazon RDS PostgreSQL Aurora MySQL Amazon RDS MySQL Amazon RDS MariaDB Amazon DocumentDB db.application - The name of the application that is connected to the database. The following values are permitted: Aurora PostgreSQL Amazon RDS PostgreSQL Amazon DocumentDB db.host - The host name of the connected client (all engines). db.query - The query that is currently running (only Amazon DocumentDB). db.query_tokenized - The digest query (only Amazon DocumentDB). db.session_type - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL). db.sql - The text of the SQL statement that is currently running (all engines except Amazon DocumentDB). db.sql_tokenized - The SQL digest (all engines except Amazon DocumentDB). db.user - The user logged in to the database (all engines except Amazon DocumentDB). db.wait_event - The event for which the database backend is waiting (all engines except Amazon DocumentDB). db.wait_event_type - The type of event for which the database backend is waiting (all engines except Amazon DocumentDB). db.wait_state - The event for which the database backend is waiting (only Amazon DocumentDB). public let group: String diff --git a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift index 5f62aaf0b3..bd13b9b36d 100644 --- a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift +++ b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS PinpointSMSVoiceV2 service. /// -/// Welcome to the Amazon Pinpoint SMS and Voice, version 2 API Reference. This guide provides information about Amazon Pinpoint SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas. Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The Amazon Pinpoint SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. Amazon Pinpoint SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API. If you're new to Amazon Pinpoint SMS, it's also helpful to review the Amazon Pinpoint SMS User Guide. The Amazon Pinpoint Developer Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Pinpoint SMS features programmatically and how to integrate Amazon Pinpoint functionality into mobile apps and other types of applications. The guide also provides key information, such as Amazon Pinpoint integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The Amazon Pinpoint SMS and Voice, version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure. +/// Welcome to the AWS End User Messaging SMS and Voice, version 2 API Reference. This guide provides information about AWS End User Messaging SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas. Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The AWS End User Messaging SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. AWS End User Messaging SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API. If you're new to AWS End User Messaging SMS and Voice, it's also helpful to review the AWS End User Messaging SMS User Guide. The AWS End User Messaging SMS User Guide provides tutorials, code samples, and procedures that demonstrate how to use AWS End User Messaging SMS and Voice features programmatically and how to integrate functionality into mobile apps and other types of applications. The guide also provides key information, such as AWS End User Messaging SMS and Voice integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The AWS End User Messaging SMS and Voice version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure. public struct PinpointSMSVoiceV2: AWSService { // MARK: Member variables @@ -126,7 +126,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Creates a new event destination in a configuration set. An event destination is a location where you send message events. The event options are Amazon CloudWatch, Amazon Kinesis Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a single destination, such as a CloudWatch or Kinesis Data Firehose destination. + /// Creates a new event destination in a configuration set. An event destination is a location where you send message events. The event options are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a single destination, such as a CloudWatch or Firehose destination. @Sendable public func createEventDestination(_ input: CreateEventDestinationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEventDestinationResult { return try await self.client.execute( @@ -139,7 +139,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Creates a new opt-out list. If the opt-out list name already exists, an error is returned. An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword "STOP," an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the Amazon Pinpoint User Guide. + /// Creates a new opt-out list. If the opt-out list name already exists, an error is returned. An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword "STOP," an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the AWS End User Messaging SMS User Guide. @Sendable public func createOptOutList(_ input: CreateOptOutListRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateOptOutListResult { return try await self.client.execute( @@ -308,7 +308,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Deletes an existing keyword from an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. Keywords "HELP" and "STOP" can't be deleted or modified. + /// Deletes an existing keyword from an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. Keywords "HELP" and "STOP" can't be deleted or modified. @Sendable public func deleteKeyword(_ input: DeleteKeywordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteKeywordResult { return try await self.client.execute( @@ -425,7 +425,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide. + /// Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide. @Sendable public func deleteTextMessageSpendLimitOverride(_ input: DeleteTextMessageSpendLimitOverrideRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTextMessageSpendLimitOverrideResult { return try await self.client.execute( @@ -451,7 +451,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide. + /// Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide. @Sendable public func deleteVoiceMessageSpendLimitOverride(_ input: DeleteVoiceMessageSpendLimitOverrideRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteVoiceMessageSpendLimitOverrideResult { return try await self.client.execute( @@ -464,7 +464,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon Web Services Support case for a service limit increase request. New Amazon Pinpoint accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients and SMS or voice recipients from fraud and abuse. + /// Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon Web Services Support case for a service limit increase request. New accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients and SMS or voice recipients from fraud and abuse. @Sendable public func describeAccountAttributes(_ input: DescribeAccountAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAccountAttributesResult { return try await self.client.execute( @@ -477,7 +477,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value. When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide. + /// Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value. When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Quotas in the AWS End User Messaging SMS User Guide. @Sendable public func describeAccountLimits(_ input: DescribeAccountLimitsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAccountLimitsResult { return try await self.client.execute( @@ -503,7 +503,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Describes the specified keywords or all keywords on your origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you specify a keyword that isn't valid, an error is returned. + /// Describes the specified keywords or all keywords on your origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. If you specify a keyword that isn't valid, an error is returned. @Sendable public func describeKeywords(_ input: DescribeKeywordsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeKeywordsResult { return try await self.client.execute( @@ -685,7 +685,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Describes the current Amazon Pinpoint monthly spend limits for sending voice and text messages. When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS spending quota for Amazon Pinpoint in the Amazon Pinpoint User Guide. + /// Describes the current monthly spend limits for sending voice and text messages. When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS, MMS, or Voice spending quota in the AWS End User Messaging SMS User Guide. @Sendable public func describeSpendLimits(_ input: DescribeSpendLimitsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSpendLimitsResult { return try await self.client.execute( @@ -802,7 +802,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Creates or updates a keyword configuration on an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you specify a keyword that isn't valid, an error is returned. + /// Creates or updates a keyword configuration on an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. If you specify a keyword that isn't valid, an error is returned. @Sendable public func putKeyword(_ input: PutKeywordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutKeywordResult { return try await self.client.execute( @@ -867,7 +867,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Request an origination phone number for use in your account. For more information on phone number request see Requesting a number in the Amazon Pinpoint User Guide. + /// Request an origination phone number for use in your account. For more information on phone number request see Request a phone number in the AWS End User Messaging SMS User Guide. @Sendable public func requestPhoneNumber(_ input: RequestPhoneNumberRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RequestPhoneNumberResult { return try await self.client.execute( @@ -919,7 +919,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Creates a new text message and sends it to a recipient's phone number. SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information, see Message Parts per Second (MPS) limits in the Amazon Pinpoint User Guide. + /// Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked. SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information about MPS, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide. @Sendable public func sendTextMessage(_ input: SendTextMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendTextMessageResult { return try await self.client.execute( @@ -932,7 +932,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Allows you to send a request that sends a voice message through Amazon Pinpoint. This operation uses Amazon Polly to convert a text script into a voice message. + /// Allows you to send a request that sends a voice message. This operation uses Amazon Polly to convert a text script into a voice message. @Sendable public func sendVoiceMessage(_ input: SendVoiceMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendVoiceMessageResult { return try await self.client.execute( @@ -1036,7 +1036,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS Voice, version 2 resource. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide. + /// Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tags in the AWS End User Messaging SMS User Guide. @Sendable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResult { return try await self.client.execute( @@ -1049,7 +1049,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2 resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide. + /// Removes the association of the specified tags from a resource. For more information on tags see Tags in the AWS End User Messaging SMS User Guide. @Sendable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResult { return try await self.client.execute( @@ -1062,7 +1062,7 @@ public struct PinpointSMSVoiceV2: AWSService { ) } - /// Updates an existing event destination in a configuration set. You can update the IAM role ARN for CloudWatch Logs and Kinesis Data Firehose. You can also enable or disable the event destination. You may want to update an event destination to change its matching event types or updating the destination resource ARN. You can't change an event destination's type between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS. + /// Updates an existing event destination in a configuration set. You can update the IAM role ARN for CloudWatch Logs and Firehose. You can also enable or disable the event destination. You may want to update an event destination to change its matching event types or updating the destination resource ARN. You can't change an event destination's type between CloudWatch Logs, Firehose, and Amazon SNS. @Sendable public func updateEventDestination(_ input: UpdateEventDestinationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEventDestinationResult { return try await self.client.execute( diff --git a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift index 93abcfd0a5..1866bd2e0d 100644 --- a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift +++ b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift @@ -742,9 +742,9 @@ extension PinpointSMSVoiceV2 { public let configurationSetName: String /// The name that identifies the event destination. public let eventDestinationName: String - /// An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose. + /// An object that contains information about an event destination for logging to Amazon Data Firehose. public let kinesisFirehoseDestination: KinesisFirehoseDestination? - /// An array of event types that determine which events to log. If "ALL" is used, then Amazon Pinpoint logs every event type. The TEXT_SENT event type is not supported. + /// An array of event types that determine which events to log. If "ALL" is used, then AWS End User Messaging SMS and Voice logs every event type. The TEXT_SENT event type is not supported. public let matchingEventTypes: [EventType] /// An object that contains information about an event destination for logging to Amazon SNS. public let snsDestination: SnsDestination? @@ -874,9 +874,9 @@ extension PinpointSMSVoiceV2 { public let deletionProtectionEnabled: Bool? /// The new two-character code, in ISO 3166-1 alpha-2 format, for the country or region of the new pool. public let isoCountryCode: String - /// The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. + /// The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. After the pool is created the MessageType can't be changed. public let messageType: MessageType - /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. + /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity. public let originationIdentity: String /// An array of tags (key and value pairs) associated with the pool. public let tags: [Tag]? @@ -929,9 +929,9 @@ extension PinpointSMSVoiceV2 { public let poolArn: String? /// The unique identifier for the pool. public let poolId: String? - /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? - /// Indicates whether shared routes are enabled for the pool. + /// Indicates whether shared routes are enabled for the pool. Set to false and only origination identities in this pool are used to send messages. public let sharedRoutesEnabled: Bool? /// The current status of the pool. CREATING: The pool is currently being created and isn't yet available for use. ACTIVE: The pool is active and available for use. DELETING: The pool is being deleted. public let status: PoolStatus? @@ -1764,7 +1764,7 @@ extension PinpointSMSVoiceV2 { public let poolArn: String? /// The PoolId of the pool that was deleted. public let poolId: String? - /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? /// Indicates whether shared routes are enabled for the pool. public let sharedRoutesEnabled: Bool? @@ -3449,7 +3449,7 @@ extension PinpointSMSVoiceV2 { public let enabled: Bool /// The name of the EventDestination. public let eventDestinationName: String - /// An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose. + /// An object that contains information about an event destination for logging to Amazon Data Firehose. public let kinesisFirehoseDestination: KinesisFirehoseDestination? /// An array of event types that determine which events to log. The TEXT_SENT event type is not supported. public let matchingEventTypes: [EventType] @@ -3499,7 +3499,7 @@ extension PinpointSMSVoiceV2 { } public struct GetProtectConfigurationCountryRuleSetResult: AWSDecodableShape { - /// A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide. + /// A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide. public let countryRuleSet: [String: ProtectConfigurationCountryRuleSetInformation] /// The capability type associated with the returned ProtectConfigurationCountryRuleSetInformation objects. public let numberCapability: NumberCapability @@ -3574,7 +3574,7 @@ extension PinpointSMSVoiceV2 { public struct KinesisFirehoseDestination: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of the delivery stream. public let deliveryStreamArn: String - /// The ARN of an Identity and Access Management role that is able to write event data to an Amazon Kinesis Data Firehose destination. + /// The ARN of an Identity and Access Management role that is able to write event data to an Amazon Data Firehose destination. public let iamRoleArn: String public init(deliveryStreamArn: String, iamRoleArn: String) { @@ -3919,7 +3919,7 @@ extension PinpointSMSVoiceV2 { public let poolId: String? /// The unique identifier for the registration. public let registrationId: String? - /// When set to false an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out request. For more information see Self-managed opt-outs + /// When set to false an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out request. For more information see Self-managed opt-outs public let selfManagedOptOutsEnabled: Bool /// The current status of the phone number. public let status: NumberStatus @@ -4013,9 +4013,9 @@ extension PinpointSMSVoiceV2 { public let poolArn: String /// The unique identifier for the pool. public let poolId: String - /// When set to false, an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. For more information see Self-managed opt-outs + /// When set to false, an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. For more information see Self-managed opt-outs public let selfManagedOptOutsEnabled: Bool - /// Allows you to enable shared routes on your pool. By default, this is set to False. If you set this value to True, your messages are sent using phone numbers or sender IDs (depending on the country) that are shared with other Amazon Pinpoint users. In some countries, such as the United States, senders aren't allowed to use shared routes and must use a dedicated phone number or short code. + /// Allows you to enable shared routes on your pool. By default, this is set to False. If you set this value to True, your messages are sent using phone numbers or sender IDs (depending on the country) that are shared with other users. In some countries, such as the United States, senders aren't allowed to use shared routes and must use a dedicated phone number or short code. public let sharedRoutesEnabled: Bool /// The current status of the pool. public let status: PoolStatus @@ -4937,7 +4937,7 @@ extension PinpointSMSVoiceV2 { public let phoneNumberId: String? /// The unique identifier for the registration. public let registrationId: String? - /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? /// The current status of the request. public let status: NumberStatus? @@ -5150,7 +5150,7 @@ extension PinpointSMSVoiceV2 { public let poolId: String? /// The unique identifier for the registration. public let registrationId: String? - /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? /// The current status of the request. public let status: NumberStatus? @@ -5524,15 +5524,15 @@ extension PinpointSMSVoiceV2 { public let configurationSetName: String? /// You can specify custom data in this field. If you do, that data is logged to the event destination. public let context: [String: String]? - /// This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India. + /// This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India. IN_ENTITY_ID The entity ID or Principal Entity (PE) ID that you received after completing the sender ID registration process. IN_TEMPLATE_ID The template ID that you received after completing the sender ID registration process. Make sure that the Template ID that you specify matches your message template exactly. If your message doesn't match the template that you provided during the registration process, the mobile carriers might reject your message. public let destinationCountryParameters: [DestinationCountryParameterKey: String]? /// The destination phone number in E.164 format. public let destinationPhoneNumber: String - /// When set to true, the message is checked and validated, but isn't sent to the end recipient. + /// When set to true, the message is checked and validated, but isn't sent to the end recipient. You are not charged for using DryRun. The Message Parts per Second (MPS) limit when using DryRun is five. If your origination identity has a lower MPS limit then the lower MPS limit is used. For more information about MPS limits, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide.. public let dryRun: Bool? /// When you register a short code in the US, you must specify a program name. If you don’t have a US short code, omit this attribute. public let keyword: String? - /// The maximum amount that you want to spend, in US dollars, per each text message part. A text message can contain multiple parts. + /// The maximum amount that you want to spend, in US dollars, per each text message. If the calculated amount to send the text message is greater than MaxPrice, the message is not sent and an error is returned. public let maxPrice: String? /// The body of the text message. public let messageBody: String? @@ -5542,7 +5542,7 @@ extension PinpointSMSVoiceV2 { public let originationIdentity: String? /// The unique identifier for the protect configuration. public let protectConfigurationId: String? - /// How long the text message is valid for. By default this is 72 hours. + /// How long the text message is valid for, in seconds. By default this is 72 hours. If the messages isn't handed off before the TTL expires we stop attempting to hand off the message and return TTL_EXPIRED event. public let timeToLive: Int? public init(configurationSetName: String? = nil, context: [String: String]? = nil, destinationCountryParameters: [DestinationCountryParameterKey: String]? = nil, destinationPhoneNumber: String, dryRun: Bool? = nil, keyword: String? = nil, maxPrice: String? = nil, messageBody: String? = nil, messageType: MessageType? = nil, originationIdentity: String? = nil, protectConfigurationId: String? = nil, timeToLive: Int? = nil) { @@ -6280,7 +6280,7 @@ extension PinpointSMSVoiceV2 { public let enabled: Bool? /// The name to use for the event destination. public let eventDestinationName: String - /// An object that contains information about an event destination for logging to Kinesis Data Firehose. + /// An object that contains information about an event destination for logging to Firehose. public let kinesisFirehoseDestination: KinesisFirehoseDestination? /// An array of event types that determine which events to log. The TEXT_SENT event type is not supported. public let matchingEventTypes: [EventType]? @@ -6350,7 +6350,7 @@ extension PinpointSMSVoiceV2 { public let optOutListName: String? /// The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn. public let phoneNumberId: String - /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? /// The Amazon Resource Name (ARN) of the two way channel. public let twoWayChannelArn: String? @@ -6479,7 +6479,7 @@ extension PinpointSMSVoiceV2 { public let optOutListName: String? /// The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn. public let poolId: String - /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? /// Indicates whether shared routes are enabled for the pool. public let sharedRoutesEnabled: Bool? @@ -6541,7 +6541,7 @@ extension PinpointSMSVoiceV2 { public let poolArn: String? /// The unique identifier of the pool. public let poolId: String? - /// When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. + /// When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? /// Indicates whether shared routes are enabled for the pool. public let sharedRoutesEnabled: Bool? @@ -6586,7 +6586,7 @@ extension PinpointSMSVoiceV2 { } public struct UpdateProtectConfigurationCountryRuleSetRequest: AWSEncodableShape { - /// A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide. + /// A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide. public let countryRuleSetUpdates: [String: ProtectConfigurationCountryRuleSetInformation] /// The number capability to apply the CountryRuleSetUpdates updates to. public let numberCapability: NumberCapability diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 9e22ecf9f9..487a6b210b 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -38869,6 +38869,7 @@ public struct QuickSightErrorType: AWSErrorType { case accessDeniedException = "AccessDeniedException" case concurrentUpdatingException = "ConcurrentUpdatingException" case conflictException = "ConflictException" + case customerManagedKeyUnavailableException = "CustomerManagedKeyUnavailableException" case domainNotWhitelistedException = "DomainNotWhitelistedException" case identityTypeNotSupportedException = "IdentityTypeNotSupportedException" case internalFailureException = "InternalFailureException" @@ -38915,6 +38916,8 @@ public struct QuickSightErrorType: AWSErrorType { public static var concurrentUpdatingException: Self { .init(.concurrentUpdatingException) } /// Updating or deleting a resource can cause an inconsistent state. public static var conflictException: Self { .init(.conflictException) } + /// The customer managed key that is registered to your Amazon QuickSight account is unavailable. + public static var customerManagedKeyUnavailableException: Self { .init(.customerManagedKeyUnavailableException) } /// The domain specified isn't on the allow list. All domains for embedded dashboards must be /// added to the approved list by an Amazon QuickSight admin. public static var domainNotWhitelistedException: Self { .init(.domainNotWhitelistedException) } diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index 1a5a033cd3..42c7191f1a 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -2282,14 +2282,17 @@ extension RDS { public let dbShardGroupIdentifier: String? /// The maximum capacity of the DB shard group in Aurora capacity units (ACUs). public let maxACU: Double? + /// The minimum capacity of the DB shard group in Aurora capacity units (ACUs). + public let minACU: Double? /// Specifies whether the DB shard group is publicly accessible. When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB shard group doesn't permit it. When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB shard group is private. If the default VPC in the target Region has an internet gateway attached to it, the DB shard group is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB shard group is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB shard group is public. public let publiclyAccessible: Bool? - public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, publiclyAccessible: Bool? = nil) { + public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil) { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier self.dbShardGroupIdentifier = dbShardGroupIdentifier self.maxACU = maxACU + self.minACU = minACU self.publiclyAccessible = publiclyAccessible } @@ -2298,6 +2301,7 @@ extension RDS { case dbClusterIdentifier = "DBClusterIdentifier" case dbShardGroupIdentifier = "DBShardGroupIdentifier" case maxACU = "MaxACU" + case minACU = "MinACU" case publiclyAccessible = "PubliclyAccessible" } } @@ -4961,18 +4965,21 @@ extension RDS { public let endpoint: String? /// The maximum capacity of the DB shard group in Aurora capacity units (ACUs). public let maxACU: Double? + /// The minimum capacity of the DB shard group in Aurora capacity units (ACUs). + public let minACU: Double? /// Indicates whether the DB shard group is publicly accessible. When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB shard group doesn't permit it. When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address. For more information, see CreateDBShardGroup. This setting is only for Aurora Limitless Database. public let publiclyAccessible: Bool? /// The status of the DB shard group. public let status: String? - public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, dbShardGroupResourceId: String? = nil, endpoint: String? = nil, maxACU: Double? = nil, publiclyAccessible: Bool? = nil, status: String? = nil) { + public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, dbShardGroupResourceId: String? = nil, endpoint: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil, status: String? = nil) { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint self.maxACU = maxACU + self.minACU = minACU self.publiclyAccessible = publiclyAccessible self.status = status } @@ -4984,6 +4991,7 @@ extension RDS { case dbShardGroupResourceId = "DBShardGroupResourceId" case endpoint = "Endpoint" case maxACU = "MaxACU" + case minACU = "MinACU" case publiclyAccessible = "PubliclyAccessible" case status = "Status" } @@ -5452,11 +5460,11 @@ extension RDS { public struct DeleteDBClusterMessage: AWSEncodableShape { /// The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive. Constraints: Must match an existing DBClusterIdentifier. public let dbClusterIdentifier: String? - /// Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. You must delete automated backups for Amazon RDS Multi-AZ DB clusters. For more information about managing automated backups for RDS Multi-AZ DB clusters, see Managing automated backups. + /// Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. public let deleteAutomatedBackups: Bool? - /// The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens + /// The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled. If you specify this parameter and also skip the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter, the request results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens public let finalDBSnapshotIdentifier: String? - /// Specifies whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By default, this parameter is disabled. You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled. + /// Specifies whether to skip the creation of a final DB cluster snapshot before RDS deletes the DB cluster. If you set this value to true, RDS doesn't create a final DB cluster snapshot. If you set this value to false or don't specify it, RDS creates a DB cluster snapshot before it deletes the DB cluster. By default, this parameter is disabled, so RDS creates a final DB cluster snapshot. If SkipFinalSnapshot is disabled, you must specify a value for the FinalDBSnapshotIdentifier parameter. public let skipFinalSnapshot: Bool? public init(dbClusterIdentifier: String? = nil, deleteAutomatedBackups: Bool? = nil, finalDBSnapshotIdentifier: String? = nil, skipFinalSnapshot: Bool? = nil) { @@ -6103,7 +6111,7 @@ extension RDS { public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A specific source to return parameters for. Valid Values: user engine service + /// A specific source to return parameters for. Valid Values: customer engine service public let source: String? public init(dbClusterParameterGroupName: String? = nil, filters: [Filter]? = nil, marker: String? = nil, maxRecords: Int? = nil, source: String? = nil) { @@ -9485,10 +9493,13 @@ extension RDS { public let dbShardGroupIdentifier: String? /// The maximum capacity of the DB shard group in Aurora capacity units (ACUs). public let maxACU: Double? + /// The minimum capacity of the DB shard group in Aurora capacity units (ACUs). + public let minACU: Double? - public init(dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil) { + public init(dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil) { self.dbShardGroupIdentifier = dbShardGroupIdentifier self.maxACU = maxACU + self.minACU = minACU } public func validate(name: String) throws { @@ -9500,6 +9511,7 @@ extension RDS { private enum CodingKeys: String, CodingKey { case dbShardGroupIdentifier = "DBShardGroupIdentifier" case maxACU = "MaxACU" + case minACU = "MinACU" } } @@ -9873,7 +9885,7 @@ extension RDS { public struct _OptionSettingsEncoding: ArrayCoderProperties { public static let member = "OptionSetting" } public struct _VpcSecurityGroupMembershipsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// A list of DBSecurityGroupMembership name strings used for this option. + /// A list of DB security groups used for this option. @OptionalCustomCoding> public var dbSecurityGroupMemberships: [String]? /// The configuration of options to include in a group. @@ -9885,7 +9897,7 @@ extension RDS { public let optionVersion: String? /// The optional port for the option. public let port: Int? - /// A list of VpcSecurityGroupMembership name strings used for this option. + /// A list of VPC security group names used for this option. @OptionalCustomCoding> public var vpcSecurityGroupMemberships: [String]? diff --git a/Sources/Soto/Services/RedshiftServerless/RedshiftServerless_shapes.swift b/Sources/Soto/Services/RedshiftServerless/RedshiftServerless_shapes.swift index 5f0a1b6717..1c5cf42168 100644 --- a/Sources/Soto/Services/RedshiftServerless/RedshiftServerless_shapes.swift +++ b/Sources/Soto/Services/RedshiftServerless/RedshiftServerless_shapes.swift @@ -410,7 +410,7 @@ extension RedshiftServerless { public let endTime: Date? /// The name of the namespace for which to create a scheduled action. public let namespaceName: String - /// The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide + /// The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management Guide public let roleArn: String /// The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. Format of cron expression is (Minutes Hours Day-of-month Month Day-of-week Year). For example, "(0 10 ? * MON *)". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide. public let schedule: Schedule @@ -645,6 +645,8 @@ extension RedshiftServerless { public let configParameters: [ConfigParameter]? /// The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet. public let enhancedVpcRouting: Bool? + /// The IP address type that the workgroup supports. Possible values are ipv4 and dualstack. + public let ipAddressType: String? /// The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. public let maxCapacity: Int? /// The name of the namespace to associate with the workgroup. @@ -662,10 +664,11 @@ extension RedshiftServerless { /// The name of the created workgroup. public let workgroupName: String - public init(baseCapacity: Int? = nil, configParameters: [ConfigParameter]? = nil, enhancedVpcRouting: Bool? = nil, maxCapacity: Int? = nil, namespaceName: String, port: Int? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, workgroupName: String) { + public init(baseCapacity: Int? = nil, configParameters: [ConfigParameter]? = nil, enhancedVpcRouting: Bool? = nil, ipAddressType: String? = nil, maxCapacity: Int? = nil, namespaceName: String, port: Int? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, workgroupName: String) { self.baseCapacity = baseCapacity self.configParameters = configParameters self.enhancedVpcRouting = enhancedVpcRouting + self.ipAddressType = ipAddressType self.maxCapacity = maxCapacity self.namespaceName = namespaceName self.port = port @@ -677,6 +680,7 @@ extension RedshiftServerless { } public func validate(name: String) throws { + try self.validate(self.ipAddressType, name: "ipAddressType", parent: name, pattern: "^(ipv4|dualstack)$") try self.validate(self.namespaceName, name: "namespaceName", parent: name, max: 64) try self.validate(self.namespaceName, name: "namespaceName", parent: name, min: 3) try self.validate(self.namespaceName, name: "namespaceName", parent: name, pattern: "^[a-z0-9-]+$") @@ -693,6 +697,7 @@ extension RedshiftServerless { case baseCapacity = "baseCapacity" case configParameters = "configParameters" case enhancedVpcRouting = "enhancedVpcRouting" + case ipAddressType = "ipAddressType" case maxCapacity = "maxCapacity" case namespaceName = "namespaceName" case port = "port" @@ -2056,6 +2061,8 @@ extension RedshiftServerless { public struct NetworkInterface: AWSDecodableShape { /// The availability Zone. public let availabilityZone: String? + /// The IPv6 address of the network interface within the subnet. + public let ipv6Address: String? /// The unique identifier of the network interface. public let networkInterfaceId: String? /// The IPv4 address of the network interface within the subnet. @@ -2063,8 +2070,9 @@ extension RedshiftServerless { /// The unique identifier of the subnet. public let subnetId: String? - public init(availabilityZone: String? = nil, networkInterfaceId: String? = nil, privateIpAddress: String? = nil, subnetId: String? = nil) { + public init(availabilityZone: String? = nil, ipv6Address: String? = nil, networkInterfaceId: String? = nil, privateIpAddress: String? = nil, subnetId: String? = nil) { self.availabilityZone = availabilityZone + self.ipv6Address = ipv6Address self.networkInterfaceId = networkInterfaceId self.privateIpAddress = privateIpAddress self.subnetId = subnetId @@ -2072,6 +2080,7 @@ extension RedshiftServerless { private enum CodingKeys: String, CodingKey { case availabilityZone = "availabilityZone" + case ipv6Address = "ipv6Address" case networkInterfaceId = "networkInterfaceId" case privateIpAddress = "privateIpAddress" case subnetId = "subnetId" @@ -2418,7 +2427,7 @@ extension RedshiftServerless { public let namespaceName: String? /// An array of timestamps of when the next scheduled actions will trigger. public let nextInvocations: [Date]? - /// The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide + /// The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management Guide public let roleArn: String? /// The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. Format of cron expression is (Minutes Hours Day-of-month Month Day-of-week Year). For example, "(0 10 ? * MON *)". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide. public let schedule: Schedule? @@ -2908,7 +2917,7 @@ extension RedshiftServerless { public let enabled: Bool? /// The end time in UTC of the scheduled action to update. public let endTime: Date? - /// The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide + /// The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management Guide public let roleArn: String? /// The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. Format of cron expression is (Minutes Hours Day-of-month Month Day-of-week Year). For example, "(0 10 ? * MON *)". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide. public let schedule: Schedule? @@ -3064,6 +3073,8 @@ extension RedshiftServerless { public let configParameters: [ConfigParameter]? /// The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC. public let enhancedVpcRouting: Bool? + /// The IP address type that the workgroup supports. Possible values are ipv4 and dualstack. + public let ipAddressType: String? /// The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. public let maxCapacity: Int? /// The custom port to use when connecting to a workgroup. Valid port ranges are 5431-5455 and 8191-8215. The default is 5439. @@ -3077,10 +3088,11 @@ extension RedshiftServerless { /// The name of the workgroup to update. You can't update the name of a workgroup once it is created. public let workgroupName: String - public init(baseCapacity: Int? = nil, configParameters: [ConfigParameter]? = nil, enhancedVpcRouting: Bool? = nil, maxCapacity: Int? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, workgroupName: String) { + public init(baseCapacity: Int? = nil, configParameters: [ConfigParameter]? = nil, enhancedVpcRouting: Bool? = nil, ipAddressType: String? = nil, maxCapacity: Int? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, workgroupName: String) { self.baseCapacity = baseCapacity self.configParameters = configParameters self.enhancedVpcRouting = enhancedVpcRouting + self.ipAddressType = ipAddressType self.maxCapacity = maxCapacity self.port = port self.publiclyAccessible = publiclyAccessible @@ -3090,6 +3102,7 @@ extension RedshiftServerless { } public func validate(name: String) throws { + try self.validate(self.ipAddressType, name: "ipAddressType", parent: name, pattern: "^(ipv4|dualstack)$") try self.validate(self.workgroupName, name: "workgroupName", parent: name, max: 64) try self.validate(self.workgroupName, name: "workgroupName", parent: name, min: 3) try self.validate(self.workgroupName, name: "workgroupName", parent: name, pattern: "^[a-z0-9-]+$") @@ -3099,6 +3112,7 @@ extension RedshiftServerless { case baseCapacity = "baseCapacity" case configParameters = "configParameters" case enhancedVpcRouting = "enhancedVpcRouting" + case ipAddressType = "ipAddressType" case maxCapacity = "maxCapacity" case port = "port" case publiclyAccessible = "publiclyAccessible" @@ -3215,6 +3229,8 @@ extension RedshiftServerless { public let endpoint: Endpoint? /// The value that specifies whether to enable enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC. public let enhancedVpcRouting: Bool? + /// The IP address type that the workgroup supports. Possible values are ipv4 and dualstack. + public let ipAddressType: String? /// The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. public let maxCapacity: Int? /// The namespace the workgroup is associated with. @@ -3240,7 +3256,7 @@ extension RedshiftServerless { /// The Amazon Redshift Serverless version of your workgroup. For more information about Amazon Redshift Serverless versions, seeCluster versions for Amazon Redshift. public let workgroupVersion: String? - public init(baseCapacity: Int? = nil, configParameters: [ConfigParameter]? = nil, creationDate: Date? = nil, crossAccountVpcs: [String]? = nil, customDomainCertificateArn: String? = nil, customDomainCertificateExpiryTime: Date? = nil, customDomainName: String? = nil, endpoint: Endpoint? = nil, enhancedVpcRouting: Bool? = nil, maxCapacity: Int? = nil, namespaceName: String? = nil, patchVersion: String? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, status: WorkgroupStatus? = nil, subnetIds: [String]? = nil, workgroupArn: String? = nil, workgroupId: String? = nil, workgroupName: String? = nil, workgroupVersion: String? = nil) { + public init(baseCapacity: Int? = nil, configParameters: [ConfigParameter]? = nil, creationDate: Date? = nil, crossAccountVpcs: [String]? = nil, customDomainCertificateArn: String? = nil, customDomainCertificateExpiryTime: Date? = nil, customDomainName: String? = nil, endpoint: Endpoint? = nil, enhancedVpcRouting: Bool? = nil, ipAddressType: String? = nil, maxCapacity: Int? = nil, namespaceName: String? = nil, patchVersion: String? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, status: WorkgroupStatus? = nil, subnetIds: [String]? = nil, workgroupArn: String? = nil, workgroupId: String? = nil, workgroupName: String? = nil, workgroupVersion: String? = nil) { self.baseCapacity = baseCapacity self.configParameters = configParameters self.creationDate = creationDate @@ -3250,6 +3266,7 @@ extension RedshiftServerless { self.customDomainName = customDomainName self.endpoint = endpoint self.enhancedVpcRouting = enhancedVpcRouting + self.ipAddressType = ipAddressType self.maxCapacity = maxCapacity self.namespaceName = namespaceName self.patchVersion = patchVersion @@ -3274,6 +3291,7 @@ extension RedshiftServerless { case customDomainName = "customDomainName" case endpoint = "endpoint" case enhancedVpcRouting = "enhancedVpcRouting" + case ipAddressType = "ipAddressType" case maxCapacity = "maxCapacity" case namespaceName = "namespaceName" case patchVersion = "patchVersion" @@ -3316,6 +3334,7 @@ public struct RedshiftServerlessErrorType: AWSErrorType { case insufficientCapacityException = "InsufficientCapacityException" case internalServerException = "InternalServerException" case invalidPaginationException = "InvalidPaginationException" + case ipv6CidrBlockNotFoundException = "Ipv6CidrBlockNotFoundException" case resourceNotFoundException = "ResourceNotFoundException" case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" @@ -3351,6 +3370,8 @@ public struct RedshiftServerlessErrorType: AWSErrorType { public static var internalServerException: Self { .init(.internalServerException) } /// The provided pagination token is invalid. public static var invalidPaginationException: Self { .init(.invalidPaginationException) } + /// There are no subnets in your VPC with associated IPv6 CIDR blocks. To use dual-stack mode, associate an IPv6 CIDR block with each subnet in your VPC. + public static var ipv6CidrBlockNotFoundException: Self { .init(.ipv6CidrBlockNotFoundException) } /// The resource could not be found. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The service limit was exceeded. diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift index 7c9b39cbbd..ec0c60ac66 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift @@ -81,7 +81,20 @@ public struct Resiliencehub: AWSService { // MARK: API Calls - /// Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of resources suported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide. + /// Accepts the resource grouping recommendations suggested by Resilience Hub for your application. + @Sendable + public func acceptResourceGroupingRecommendations(_ input: AcceptResourceGroupingRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AcceptResourceGroupingRecommendationsResponse { + return try await self.client.execute( + operation: "AcceptResourceGroupingRecommendations", + path: "/accept-resource-grouping-recommendations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of resources supported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide. @Sendable public func addDraftAppVersionResourceMappings(_ input: AddDraftAppVersionResourceMappingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AddDraftAppVersionResourceMappingsResponse { return try await self.client.execute( @@ -315,7 +328,7 @@ public struct Resiliencehub: AWSService { ) } - /// Describes a resource of the Resilience Hub application. This API accepts only one of the following parameters to descibe the resource: resourceName logicalResourceId physicalResourceId (Along with physicalResourceId, you can also provide awsAccountId, and awsRegion) + /// Describes a resource of the Resilience Hub application. This API accepts only one of the following parameters to describe the resource: resourceName logicalResourceId physicalResourceId (Along with physicalResourceId, you can also provide awsAccountId, and awsRegion) @Sendable public func describeAppVersionResource(_ input: DescribeAppVersionResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAppVersionResourceResponse { return try await self.client.execute( @@ -354,7 +367,7 @@ public struct Resiliencehub: AWSService { ) } - /// Describes the status of importing resources to an application version. If you get a 404 error with ResourceImportStatusNotFoundAppMetadataException, you must call importResourcesToDraftAppVersion after creating the application and before calling describeDraftAppVersionResourcesImportStatus to obtain the status. + /// Describes the status of importing resources to an application version. If you get a 404 error with ResourceImportStatusNotFoundAppMetadataException, you must call importResourcesToDraftAppVersion after creating the application and before calling describeDraftAppVersionResourcesImportStatus to obtain the status. @Sendable public func describeDraftAppVersionResourcesImportStatus(_ input: DescribeDraftAppVersionResourcesImportStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDraftAppVersionResourcesImportStatusResponse { return try await self.client.execute( @@ -380,6 +393,19 @@ public struct Resiliencehub: AWSService { ) } + /// Describes the resource grouping recommendation tasks run by Resilience Hub for your application. + @Sendable + public func describeResourceGroupingRecommendationTask(_ input: DescribeResourceGroupingRecommendationTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeResourceGroupingRecommendationTaskResponse { + return try await self.client.execute( + operation: "DescribeResourceGroupingRecommendationTask", + path: "/describe-resource-grouping-recommendation-task", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Imports resources to Resilience Hub application draft version from different input sources. For more information about the input sources supported by Resilience Hub, see Discover the structure and describe your Resilience Hub application. @Sendable public func importResourcesToDraftAppVersion(_ input: ImportResourcesToDraftAppVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportResourcesToDraftAppVersionResponse { @@ -575,6 +601,19 @@ public struct Resiliencehub: AWSService { ) } + /// Lists the resource grouping recommendations suggested by Resilience Hub for your application. + @Sendable + public func listResourceGroupingRecommendations(_ input: ListResourceGroupingRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListResourceGroupingRecommendationsResponse { + return try await self.client.execute( + operation: "ListResourceGroupingRecommendations", + path: "/list-resource-grouping-recommendations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the standard operating procedure (SOP) recommendations for the Resilience Hub applications. @Sendable public func listSopRecommendations(_ input: ListSopRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSopRecommendationsResponse { @@ -666,6 +705,19 @@ public struct Resiliencehub: AWSService { ) } + /// Rejects resource grouping recommendations. + @Sendable + public func rejectResourceGroupingRecommendations(_ input: RejectResourceGroupingRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RejectResourceGroupingRecommendationsResponse { + return try await self.client.execute( + operation: "RejectResourceGroupingRecommendations", + path: "/reject-resource-grouping-recommendations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes resource mappings from a draft application version. @Sendable public func removeDraftAppVersionResourceMappings(_ input: RemoveDraftAppVersionResourceMappingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RemoveDraftAppVersionResourceMappingsResponse { @@ -705,6 +757,19 @@ public struct Resiliencehub: AWSService { ) } + /// Starts grouping recommendation task. + @Sendable + public func startResourceGroupingRecommendationTask(_ input: StartResourceGroupingRecommendationTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartResourceGroupingRecommendationTaskResponse { + return try await self.client.execute( + operation: "StartResourceGroupingRecommendationTask", + path: "/start-resource-grouping-recommendation-task", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Applies one or more tags to a resource. @Sendable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -1076,6 +1141,25 @@ extension Resiliencehub { ) } + /// Lists the resource grouping recommendations suggested by Resilience Hub for your application. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listResourceGroupingRecommendationsPaginator( + _ input: ListResourceGroupingRecommendationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listResourceGroupingRecommendations, + inputKey: \ListResourceGroupingRecommendationsRequest.nextToken, + outputKey: \ListResourceGroupingRecommendationsResponse.nextToken, + logger: logger + ) + } + /// Lists the standard operating procedure (SOP) recommendations for the Resilience Hub applications. /// Return PaginatorSequence for operation. /// @@ -1313,6 +1397,16 @@ extension Resiliencehub.ListResiliencyPoliciesRequest: AWSPaginateToken { } } +extension Resiliencehub.ListResourceGroupingRecommendationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Resiliencehub.ListResourceGroupingRecommendationsRequest { + return .init( + appArn: self.appArn, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Resiliencehub.ListSopRecommendationsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Resiliencehub.ListSopRecommendationsRequest { return .init( diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift index 742cfe3eff..a3f48ebbcd 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift @@ -43,6 +43,8 @@ extension Resiliencehub { public enum AppComplianceStatusType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case changesDetected = "ChangesDetected" + case missingPolicy = "MissingPolicy" + case notApplicable = "NotApplicable" case notAssessed = "NotAssessed" case policyBreached = "PolicyBreached" case policyMet = "PolicyMet" @@ -77,6 +79,8 @@ extension Resiliencehub { } public enum ComplianceStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case missingPolicy = "MissingPolicy" + case notApplicable = "NotApplicable" case policyBreached = "PolicyBreached" case policyMet = "PolicyMet" public var description: String { return self.rawValue } @@ -156,6 +160,27 @@ extension Resiliencehub { public var description: String { return self.rawValue } } + public enum GroupingRecommendationConfidenceLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case high = "High" + case medium = "Medium" + public var description: String { return self.rawValue } + } + + public enum GroupingRecommendationRejectionReason: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case distinctBusinessPurpose = "DistinctBusinessPurpose" + case distinctUserGroupHandling = "DistinctUserGroupHandling" + case other = "Other" + case separateDataConcern = "SeparateDataConcern" + public var description: String { return self.rawValue } + } + + public enum GroupingRecommendationStatusType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case accepted = "Accepted" + case pendingDecision = "PendingDecision" + case rejected = "Rejected" + public var description: String { return self.rawValue } + } + public enum HaArchitecture: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case backupAndRestore = "BackupAndRestore" case multiSite = "MultiSite" @@ -181,6 +206,7 @@ extension Resiliencehub { case breachedCanMeet = "BreachedCanMeet" case breachedUnattainable = "BreachedUnattainable" case metCanImprove = "MetCanImprove" + case missingPolicy = "MissingPolicy" public var description: String { return self.rawValue } } @@ -263,6 +289,14 @@ extension Resiliencehub { public var description: String { return self.rawValue } } + public enum ResourcesGroupingRecGenStatusType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "Failed" + case inProgress = "InProgress" + case pending = "Pending" + case success = "Success" + public var description: String { return self.rawValue } + } + public enum SopServiceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ssm = "SSM" public var description: String { return self.rawValue } @@ -291,6 +325,71 @@ extension Resiliencehub { // MARK: Shapes + public struct AcceptGroupingRecommendationEntry: AWSEncodableShape { + /// Indicates the identifier of the grouping recommendation. + public let groupingRecommendationId: String + + public init(groupingRecommendationId: String) { + self.groupingRecommendationId = groupingRecommendationId + } + + public func validate(name: String) throws { + try self.validate(self.groupingRecommendationId, name: "groupingRecommendationId", parent: name, max: 255) + try self.validate(self.groupingRecommendationId, name: "groupingRecommendationId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case groupingRecommendationId = "groupingRecommendationId" + } + } + + public struct AcceptResourceGroupingRecommendationsRequest: AWSEncodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + /// Indicates the list of resource grouping recommendations you want to include in your application. + public let entries: [AcceptGroupingRecommendationEntry] + + public init(appArn: String, entries: [AcceptGroupingRecommendationEntry]) { + self.appArn = appArn + self.entries = entries + } + + public func validate(name: String) throws { + try self.validate(self.appArn, name: "appArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + try self.entries.forEach { + try $0.validate(name: "\(name).entries[]") + } + try self.validate(self.entries, name: "entries", parent: name, max: 30) + try self.validate(self.entries, name: "entries", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case entries = "entries" + } + } + + public struct AcceptResourceGroupingRecommendationsResponse: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + /// Indicates the list of resource grouping recommendations that could not be included in your application. + public let failedEntries: [FailedGroupingRecommendationEntry] + + public init(appArn: String, failedEntries: [FailedGroupingRecommendationEntry]) { + self.appArn = appArn + self.failedEntries = failedEntries + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case failedEntries = "failedEntries" + } + } + public struct AddDraftAppVersionResourceMappingsRequest: AWSEncodableShape { /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, @@ -526,13 +625,15 @@ extension Resiliencehub { public let resourceErrorsDetails: ResourceErrorsDetails? /// Starting time for the action. public let startTime: Date? + /// Indicates a concise summary that provides an overview of the Resilience Hub assessment. + public let summary: AssessmentSummary? /// Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. /// Each tag consists of a key/value pair. public let tags: [String: String]? /// Version name of the published application. public let versionName: String? - public init(appArn: String? = nil, appVersion: String? = nil, assessmentArn: String, assessmentName: String? = nil, assessmentStatus: AssessmentStatus, compliance: [DisruptionType: DisruptionCompliance]? = nil, complianceStatus: ComplianceStatus? = nil, cost: Cost? = nil, driftStatus: DriftStatus? = nil, endTime: Date? = nil, invoker: AssessmentInvoker, message: String? = nil, policy: ResiliencyPolicy? = nil, resiliencyScore: ResiliencyScore? = nil, resourceErrorsDetails: ResourceErrorsDetails? = nil, startTime: Date? = nil, tags: [String: String]? = nil, versionName: String? = nil) { + public init(appArn: String? = nil, appVersion: String? = nil, assessmentArn: String, assessmentName: String? = nil, assessmentStatus: AssessmentStatus, compliance: [DisruptionType: DisruptionCompliance]? = nil, complianceStatus: ComplianceStatus? = nil, cost: Cost? = nil, driftStatus: DriftStatus? = nil, endTime: Date? = nil, invoker: AssessmentInvoker, message: String? = nil, policy: ResiliencyPolicy? = nil, resiliencyScore: ResiliencyScore? = nil, resourceErrorsDetails: ResourceErrorsDetails? = nil, startTime: Date? = nil, summary: AssessmentSummary? = nil, tags: [String: String]? = nil, versionName: String? = nil) { self.appArn = appArn self.appVersion = appVersion self.assessmentArn = assessmentArn @@ -549,6 +650,7 @@ extension Resiliencehub { self.resiliencyScore = resiliencyScore self.resourceErrorsDetails = resourceErrorsDetails self.startTime = startTime + self.summary = summary self.tags = tags self.versionName = versionName } @@ -570,6 +672,7 @@ extension Resiliencehub { case resiliencyScore = "resiliencyScore" case resourceErrorsDetails = "resourceErrorsDetails" case startTime = "startTime" + case summary = "summary" case tags = "tags" case versionName = "versionName" } @@ -590,7 +693,7 @@ extension Resiliencehub { public let assessmentName: String? /// Current status of the assessment for the resiliency policy. public let assessmentStatus: AssessmentStatus - /// TCurrent status of compliance for the resiliency policy. + /// Current status of compliance for the resiliency policy. public let complianceStatus: ComplianceStatus? /// Cost for an application. public let cost: Cost? @@ -820,6 +923,44 @@ extension Resiliencehub { } } + public struct AssessmentRiskRecommendation: AWSDecodableShape { + /// Indicates the Application Components (AppComponents) that were assessed as part of the assessnent and are associated with the identified risk and recommendation. This property is available only in the US East (N. Virginia) Region. + public let appComponents: [String]? + /// Indicates the recommendation provided by the Resilience Hub to address the identified risks in the application. This property is available only in the US East (N. Virginia) Region. + public let recommendation: String? + /// Indicates the description of the potential risk identified in the application as part of the Resilience Hub assessment. This property is available only in the US East (N. Virginia) Region. + public let risk: String? + + public init(appComponents: [String]? = nil, recommendation: String? = nil, risk: String? = nil) { + self.appComponents = appComponents + self.recommendation = recommendation + self.risk = risk + } + + private enum CodingKeys: String, CodingKey { + case appComponents = "appComponents" + case recommendation = "recommendation" + case risk = "risk" + } + } + + public struct AssessmentSummary: AWSDecodableShape { + /// Indicates the top risks and recommendations identified by the Resilience Hub assessment, each representing a specific risk and the corresponding recommendation to address it. This property is available only in the US East (N. Virginia) Region. + public let riskRecommendations: [AssessmentRiskRecommendation]? + /// Indicates a concise summary that provides an overview of the Resilience Hub assessment. This property is available only in the US East (N. Virginia) Region. + public let summary: String? + + public init(riskRecommendations: [AssessmentRiskRecommendation]? = nil, summary: String? = nil) { + self.riskRecommendations = riskRecommendations + self.summary = summary + } + + private enum CodingKeys: String, CodingKey { + case riskRecommendations = "riskRecommendations" + case summary = "summary" + } + } + public struct BatchUpdateRecommendationStatusFailedEntry: AWSDecodableShape { /// An identifier of an entry in this batch that is used to communicate the result. The entryIds of a batch request need to be unique within a request. public let entryId: String @@ -2266,6 +2407,52 @@ extension Resiliencehub { } } + public struct DescribeResourceGroupingRecommendationTaskRequest: AWSEncodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + /// Indicates the identifier of the grouping recommendation task. + public let groupingId: String? + + public init(appArn: String, groupingId: String? = nil) { + self.appArn = appArn + self.groupingId = groupingId + } + + public func validate(name: String) throws { + try self.validate(self.appArn, name: "appArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + try self.validate(self.groupingId, name: "groupingId", parent: name, max: 255) + try self.validate(self.groupingId, name: "groupingId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case groupingId = "groupingId" + } + } + + public struct DescribeResourceGroupingRecommendationTaskResponse: AWSDecodableShape { + /// Indicates the error that occurred while generating a grouping recommendation. + public let errorMessage: String? + /// Indicates the identifier of the grouping recommendation task. + public let groupingId: String + /// Status of the action. + public let status: ResourcesGroupingRecGenStatusType + + public init(errorMessage: String? = nil, groupingId: String, status: ResourcesGroupingRecGenStatusType) { + self.errorMessage = errorMessage + self.groupingId = groupingId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case errorMessage = "errorMessage" + case groupingId = "groupingId" + case status = "status" + } + } + public struct DisruptionCompliance: AWSDecodableShape { /// The Recovery Point Objective (RPO) that is achievable, in seconds. public let achievableRpoInSecs: Int? @@ -2397,6 +2584,23 @@ extension Resiliencehub { } } + public struct FailedGroupingRecommendationEntry: AWSDecodableShape { + /// Indicates the error that occurred while implementing a grouping recommendation. + public let errorMessage: String + /// Indicates the identifier of the grouping recommendation. + public let groupingRecommendationId: String + + public init(errorMessage: String, groupingRecommendationId: String) { + self.errorMessage = errorMessage + self.groupingRecommendationId = groupingRecommendationId + } + + private enum CodingKeys: String, CodingKey { + case errorMessage = "errorMessage" + case groupingRecommendationId = "groupingRecommendationId" + } + } + public struct FailurePolicy: AWSEncodableShape & AWSDecodableShape { /// Recovery Point Objective (RPO) in seconds. public let rpoInSecs: Int @@ -2419,6 +2623,101 @@ extension Resiliencehub { } } + public struct GroupingAppComponent: AWSDecodableShape { + /// Indicates the identifier of an AppComponent. + public let appComponentId: String + /// Indicates the name of an AppComponent. + public let appComponentName: String + /// Indicates the type of an AppComponent. + public let appComponentType: String + + public init(appComponentId: String, appComponentName: String, appComponentType: String) { + self.appComponentId = appComponentId + self.appComponentName = appComponentName + self.appComponentType = appComponentType + } + + private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" + case appComponentName = "appComponentName" + case appComponentType = "appComponentType" + } + } + + public struct GroupingRecommendation: AWSDecodableShape { + /// Indicates the confidence level of Resilience Hub on the grouping recommendation. + public let confidenceLevel: GroupingRecommendationConfidenceLevel + /// Indicates the creation time of the grouping recommendation. + public let creationTime: Date + /// Indicates the name of the recommended Application Component (AppComponent). + public let groupingAppComponent: GroupingAppComponent + /// Indicates all the reasons available for rejecting a grouping recommendation. + public let groupingRecommendationId: String + /// Indicates all the reasons available for rejecting a grouping recommendation. + public let recommendationReasons: [String] + /// Indicates the reason you had selected while rejecting a grouping recommendation. + public let rejectionReason: GroupingRecommendationRejectionReason? + /// Indicates the resources that are grouped in a recommended AppComponent. + public let resources: [GroupingResource] + /// Indicates the confidence level of the grouping recommendation. + public let score: Double + /// Indicates the status of grouping resources into AppComponents. + public let status: GroupingRecommendationStatusType + + public init(confidenceLevel: GroupingRecommendationConfidenceLevel, creationTime: Date, groupingAppComponent: GroupingAppComponent, groupingRecommendationId: String, recommendationReasons: [String], rejectionReason: GroupingRecommendationRejectionReason? = nil, resources: [GroupingResource], score: Double, status: GroupingRecommendationStatusType) { + self.confidenceLevel = confidenceLevel + self.creationTime = creationTime + self.groupingAppComponent = groupingAppComponent + self.groupingRecommendationId = groupingRecommendationId + self.recommendationReasons = recommendationReasons + self.rejectionReason = rejectionReason + self.resources = resources + self.score = score + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case confidenceLevel = "confidenceLevel" + case creationTime = "creationTime" + case groupingAppComponent = "groupingAppComponent" + case groupingRecommendationId = "groupingRecommendationId" + case recommendationReasons = "recommendationReasons" + case rejectionReason = "rejectionReason" + case resources = "resources" + case score = "score" + case status = "status" + } + } + + public struct GroupingResource: AWSDecodableShape { + /// Indicates the logical identifier of the resource. + public let logicalResourceId: LogicalResourceId + /// Indicates the physical identifier of the resource. + public let physicalResourceId: PhysicalResourceId + /// Indicates the resource name. + public let resourceName: String + /// Indicates the resource type. + public let resourceType: String + /// Indicates the identifier of the source AppComponents in which the resources were previously grouped into. + public let sourceAppComponentIds: [String] + + public init(logicalResourceId: LogicalResourceId, physicalResourceId: PhysicalResourceId, resourceName: String, resourceType: String, sourceAppComponentIds: [String]) { + self.logicalResourceId = logicalResourceId + self.physicalResourceId = physicalResourceId + self.resourceName = resourceName + self.resourceType = resourceType + self.sourceAppComponentIds = sourceAppComponentIds + } + + private enum CodingKeys: String, CodingKey { + case logicalResourceId = "logicalResourceId" + case physicalResourceId = "physicalResourceId" + case resourceName = "resourceName" + case resourceType = "resourceType" + case sourceAppComponentIds = "sourceAppComponentIds" + } + } + public struct ImportResourcesToDraftAppVersionRequest: AWSEncodableShape { /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, @@ -2551,9 +2850,9 @@ extension Resiliencehub { /// arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. public let assessmentArn: String - /// Indicates the maximum number of applications requested. + /// Indicates the maximum number of compliance drifts requested. public let maxResults: Int? - /// Indicates the unique token number of the next application to be checked for compliance and regulatory requirements from the list of applications. + /// Null, or the token from a previous call to get the next set of results. public let nextToken: String? public init(assessmentArn: String, maxResults: Int? = nil, nextToken: String? = nil) { @@ -2579,7 +2878,7 @@ extension Resiliencehub { public struct ListAppAssessmentComplianceDriftsResponse: AWSDecodableShape { /// Indicates compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) detected for an assessed entity. public let complianceDrifts: [ComplianceDrift] - /// Token number of the next application to be checked for compliance and regulatory requirements from the list of applications. + /// Null, or the token from a previous call to get the next set of results. public let nextToken: String? public init(complianceDrifts: [ComplianceDrift], nextToken: String? = nil) { @@ -3112,7 +3411,7 @@ extension Resiliencehub { public let name: String? /// Null, or the token from a previous call to get the next set of results. public let nextToken: String? - /// The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending order. To sort the appliation list in descending order, set this field to True. + /// The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending order. To sort the application list in descending order, set this field to True. public let reverseOrder: Bool? /// Indicates the upper limit of the range that is used to filter the applications based on their last assessment times. public let toLastAssessmentTime: Date? @@ -3290,6 +3589,57 @@ extension Resiliencehub { } } + public struct ListResourceGroupingRecommendationsRequest: AWSEncodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String? + /// Maximum number of grouping recommendations to be displayed per Resilience Hub application. + public let maxResults: Int? + /// Null, or the token from a previous call to get the next set of results. + public let nextToken: String? + + public init(appArn: String? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.appArn = appArn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.appArn, key: "appArn") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.appArn, name: "appArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListResourceGroupingRecommendationsResponse: AWSDecodableShape { + /// List of resource grouping recommendations generated by Resilience Hub. + public let groupingRecommendations: [GroupingRecommendation] + /// Null, or the token from a previous call to get the next set of results. + public let nextToken: String? + + public init(groupingRecommendations: [GroupingRecommendation], nextToken: String? = nil) { + self.groupingRecommendations = groupingRecommendations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case groupingRecommendations = "groupingRecommendations" + case nextToken = "nextToken" + } + } + public struct ListSopRecommendationsRequest: AWSEncodableShape { /// Amazon Resource Name (ARN) of the assessment. The format for this ARN is: /// arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, @@ -3903,6 +4253,75 @@ extension Resiliencehub { } } + public struct RejectGroupingRecommendationEntry: AWSEncodableShape { + /// Indicates the identifier of the grouping recommendation. + public let groupingRecommendationId: String + /// Indicates the reason you had selected while rejecting a grouping recommendation. + public let rejectionReason: GroupingRecommendationRejectionReason? + + public init(groupingRecommendationId: String, rejectionReason: GroupingRecommendationRejectionReason? = nil) { + self.groupingRecommendationId = groupingRecommendationId + self.rejectionReason = rejectionReason + } + + public func validate(name: String) throws { + try self.validate(self.groupingRecommendationId, name: "groupingRecommendationId", parent: name, max: 255) + try self.validate(self.groupingRecommendationId, name: "groupingRecommendationId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case groupingRecommendationId = "groupingRecommendationId" + case rejectionReason = "rejectionReason" + } + } + + public struct RejectResourceGroupingRecommendationsRequest: AWSEncodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + /// Indicates the list of resource grouping recommendations you have selected to exclude from your application. + public let entries: [RejectGroupingRecommendationEntry] + + public init(appArn: String, entries: [RejectGroupingRecommendationEntry]) { + self.appArn = appArn + self.entries = entries + } + + public func validate(name: String) throws { + try self.validate(self.appArn, name: "appArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + try self.entries.forEach { + try $0.validate(name: "\(name).entries[]") + } + try self.validate(self.entries, name: "entries", parent: name, max: 30) + try self.validate(self.entries, name: "entries", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case entries = "entries" + } + } + + public struct RejectResourceGroupingRecommendationsResponse: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + /// Indicates the list of resource grouping recommendations that failed to get excluded in your application. + public let failedEntries: [FailedGroupingRecommendationEntry] + + public init(appArn: String, failedEntries: [FailedGroupingRecommendationEntry]) { + self.appArn = appArn + self.failedEntries = failedEntries + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case failedEntries = "failedEntries" + } + } + public struct RemoveDraftAppVersionResourceMappingsRequest: AWSEncodableShape { /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, @@ -4397,6 +4816,52 @@ extension Resiliencehub { } } + public struct StartResourceGroupingRecommendationTaskRequest: AWSEncodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + + public init(appArn: String) { + self.appArn = appArn + } + + public func validate(name: String) throws { + try self.validate(self.appArn, name: "appArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + } + } + + public struct StartResourceGroupingRecommendationTaskResponse: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String + /// Indicates the error that occurred while executing a grouping recommendation task. + public let errorMessage: String? + /// Indicates the identifier of the grouping recommendation task. + public let groupingId: String + /// Status of the action. + public let status: ResourcesGroupingRecGenStatusType + + public init(appArn: String, errorMessage: String? = nil, groupingId: String, status: ResourcesGroupingRecGenStatusType) { + self.appArn = appArn + self.errorMessage = errorMessage + self.groupingId = groupingId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case errorMessage = "errorMessage" + case groupingId = "groupingId" + case status = "status" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// Amazon Resource Name (ARN) of the resource. public let resourceArn: String diff --git a/Sources/Soto/Services/RolesAnywhere/RolesAnywhere_shapes.swift b/Sources/Soto/Services/RolesAnywhere/RolesAnywhere_shapes.swift index 800b35f14b..377134f64c 100644 --- a/Sources/Soto/Services/RolesAnywhere/RolesAnywhere_shapes.swift +++ b/Sources/Soto/Services/RolesAnywhere/RolesAnywhere_shapes.swift @@ -112,6 +112,8 @@ extension RolesAnywhere { } public struct CreateProfileRequest: AWSEncodableShape { + /// Used to determine if a custom role session name will be accepted in a temporary credential request. + public let acceptRoleSessionName: Bool? /// Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the /// CreateSession API documentation /// page for more details. In requests, if this value is not provided, the default value will be 3600. @@ -131,7 +133,8 @@ extension RolesAnywhere { /// The tags to attach to the profile. public let tags: [Tag]? - public init(durationSeconds: Int? = nil, enabled: Bool? = nil, managedPolicyArns: [String]? = nil, name: String, requireInstanceProperties: Bool? = nil, roleArns: [String], sessionPolicy: String? = nil, tags: [Tag]? = nil) { + public init(acceptRoleSessionName: Bool? = nil, durationSeconds: Int? = nil, enabled: Bool? = nil, managedPolicyArns: [String]? = nil, name: String, requireInstanceProperties: Bool? = nil, roleArns: [String], sessionPolicy: String? = nil, tags: [Tag]? = nil) { + self.acceptRoleSessionName = acceptRoleSessionName self.durationSeconds = durationSeconds self.enabled = enabled self.managedPolicyArns = managedPolicyArns @@ -160,6 +163,7 @@ extension RolesAnywhere { } private enum CodingKeys: String, CodingKey { + case acceptRoleSessionName = "acceptRoleSessionName" case durationSeconds = "durationSeconds" case enabled = "enabled" case managedPolicyArns = "managedPolicyArns" @@ -614,6 +618,8 @@ extension RolesAnywhere { } public struct ProfileDetail: AWSDecodableShape { + /// Used to determine if a custom role session name will be accepted in a temporary credential request. + public let acceptRoleSessionName: Bool? /// A mapping applied to the authenticating end-entity certificate. public let attributeMappings: [AttributeMapping]? /// The ISO-8601 timestamp when the profile was created. @@ -643,7 +649,8 @@ extension RolesAnywhere { /// The ISO-8601 timestamp when the profile was last updated. public let updatedAt: Date? - public init(attributeMappings: [AttributeMapping]? = nil, createdAt: Date? = nil, createdBy: String? = nil, durationSeconds: Int? = nil, enabled: Bool? = nil, managedPolicyArns: [String]? = nil, name: String? = nil, profileArn: String? = nil, profileId: String? = nil, requireInstanceProperties: Bool? = nil, roleArns: [String]? = nil, sessionPolicy: String? = nil, updatedAt: Date? = nil) { + public init(acceptRoleSessionName: Bool? = nil, attributeMappings: [AttributeMapping]? = nil, createdAt: Date? = nil, createdBy: String? = nil, durationSeconds: Int? = nil, enabled: Bool? = nil, managedPolicyArns: [String]? = nil, name: String? = nil, profileArn: String? = nil, profileId: String? = nil, requireInstanceProperties: Bool? = nil, roleArns: [String]? = nil, sessionPolicy: String? = nil, updatedAt: Date? = nil) { + self.acceptRoleSessionName = acceptRoleSessionName self.attributeMappings = attributeMappings self.createdAt = createdAt self.createdBy = createdBy @@ -660,6 +667,7 @@ extension RolesAnywhere { } private enum CodingKeys: String, CodingKey { + case acceptRoleSessionName = "acceptRoleSessionName" case attributeMappings = "attributeMappings" case createdAt = "createdAt" case createdBy = "createdBy" @@ -1191,6 +1199,8 @@ extension RolesAnywhere { } public struct UpdateProfileRequest: AWSEncodableShape { + /// Used to determine if a custom role session name will be accepted in a temporary credential request. + public let acceptRoleSessionName: Bool? /// Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the /// CreateSession API documentation /// page for more details. In requests, if this value is not provided, the default value will be 3600. @@ -1206,7 +1216,8 @@ extension RolesAnywhere { /// A session policy that applies to the trust boundary of the vended session credentials. public let sessionPolicy: String? - public init(durationSeconds: Int? = nil, managedPolicyArns: [String]? = nil, name: String? = nil, profileId: String, roleArns: [String]? = nil, sessionPolicy: String? = nil) { + public init(acceptRoleSessionName: Bool? = nil, durationSeconds: Int? = nil, managedPolicyArns: [String]? = nil, name: String? = nil, profileId: String, roleArns: [String]? = nil, sessionPolicy: String? = nil) { + self.acceptRoleSessionName = acceptRoleSessionName self.durationSeconds = durationSeconds self.managedPolicyArns = managedPolicyArns self.name = name @@ -1218,6 +1229,7 @@ extension RolesAnywhere { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.acceptRoleSessionName, forKey: .acceptRoleSessionName) try container.encodeIfPresent(self.durationSeconds, forKey: .durationSeconds) try container.encodeIfPresent(self.managedPolicyArns, forKey: .managedPolicyArns) try container.encodeIfPresent(self.name, forKey: .name) @@ -1243,6 +1255,7 @@ extension RolesAnywhere { } private enum CodingKeys: String, CodingKey { + case acceptRoleSessionName = "acceptRoleSessionName" case durationSeconds = "durationSeconds" case managedPolicyArns = "managedPolicyArns" case name = "name" diff --git a/Sources/Soto/Services/S3/S3_api.swift b/Sources/Soto/Services/S3/S3_api.swift index 22c311b8b8..9e0395f77e 100644 --- a/Sources/Soto/Services/S3/S3_api.swift +++ b/Sources/Soto/Services/S3/S3_api.swift @@ -213,7 +213,7 @@ public struct S3: AWSService { // MARK: API Calls - /// This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty. Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultupartUpload operation to abort all the in-progress multipart uploads. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to AbortMultipartUpload: CreateMultipartUpload UploadPart CompleteMultipartUpload ListParts ListMultipartUploads @Sendable public func abortMultipartUpload(_ input: AbortMultipartUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AbortMultipartUploadOutput { @@ -241,7 +241,7 @@ public struct S3: AWSService { ) } - /// Creates a copy of an object that is already stored in Amazon S3. You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Authentication and authorization All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have read access to the source object and write access to the destination bucket. General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation. If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. If the copy is successful, you receive a response with information about the copied object. A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject + /// Creates a copy of an object that is already stored in Amazon S3. You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Authentication and authorization All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have read access to the source object and write access to the destination bucket. General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation. If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. If the copy is successful, you receive a response with information about the copied object. A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject @Sendable public func copyObject(_ input: CopyObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CopyObjectOutput { return try await self.client.execute( @@ -909,7 +909,7 @@ public struct S3: AWSService { ) } - /// You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it. If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes. Directory buckets - You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Authentication and authorization All HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory bucket - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide. Directory bucket permissions - You must have the s3express:CreateSession permission in the Action element of a policy. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the bucket. For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. + /// You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it. If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes. Authentication and authorization General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide. Directory bucket permissions - You must have the s3express:CreateSession permission in the Action element of a policy. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the bucket. For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. @Sendable public func headBucket(_ input: HeadBucketRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> HeadBucketOutput { return try await self.client.execute( @@ -922,8 +922,8 @@ public struct S3: AWSService { ) } - /// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes. Request headers are limited to 8 KB in size. For more information, see Common Request Headers. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Versioning If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response. If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header. Directory buckets - Delete marker is not supported by directory buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to HeadObject: GetObject GetObjectAttributes + /// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes. Request headers are limited to 8 KB in size. For more information, see Common Request Headers. Permissions General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Versioning If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response. If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header. Directory buckets - Delete marker is not supported by directory buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. The following actions are related to HeadObject: GetObject GetObjectAttributes @Sendable public func headObject(_ input: HeadObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> HeadObjectOutput { return try await self.client.execute( @@ -990,12 +990,13 @@ public struct S3: AWSService { /// This operation is not supported by directory buckets. Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets permission. For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets. @Sendable - public func listBuckets(logger: Logger = AWSClient.loggingDisabled) async throws -> ListBucketsOutput { + public func listBuckets(_ input: ListBucketsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBucketsOutput { return try await self.client.execute( operation: "ListBuckets", path: "/?x-id=ListBuckets", httpMethod: .GET, serviceConfig: self.config, + input: input, logger: logger ) } @@ -1014,7 +1015,7 @@ public struct S3: AWSService { ) } - /// This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted. Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response. Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response. For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted. Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultupartUpload operation to abort all the in-progress multipart uploads. The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response. Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response. For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Sorting of multipart uploads in response General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria: Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys. Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later. Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to ListMultipartUploads: CreateMultipartUpload UploadPart CompleteMultipartUpload ListParts AbortMultipartUpload @Sendable public func listMultipartUploads(_ input: ListMultipartUploadsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMultipartUploadsOutput { @@ -1054,7 +1055,7 @@ public struct S3: AWSService { ) } - /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets. General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't return prefixes that are related only to in-progress multipart uploads. Directory buckets - For directory buckets, ListObjectsV2 response includes the prefixes that are related only to in-progress multipart uploads. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Sorting order of returned objects General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names. Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects. The following operations are related to ListObjectsV2: GetObject PutObject CreateBucket @Sendable public func listObjectsV2(_ input: ListObjectsV2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> ListObjectsV2Output { @@ -1134,7 +1135,7 @@ public struct S3: AWSService { ) } - /// This operation is not supported by directory buckets. This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests. This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption + /// This operation is not supported by directory buckets. This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption @Sendable public func putBucketEncryption(_ input: PutBucketEncryptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1291,7 +1292,7 @@ public struct S3: AWSService { ) } - /// This operation is not supported by directory buckets. Sets the versioning state of an existing bucket. You can set the versioning state with one of the following values: Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket. If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The following operations are related to PutBucketVersioning: CreateBucket DeleteBucket GetBucketVersioning + /// This operation is not supported by directory buckets. When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket. Sets the versioning state of an existing bucket. You can set the versioning state with one of the following values: Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket. If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The following operations are related to PutBucketVersioning: CreateBucket DeleteBucket GetBucketVersioning @Sendable public func putBucketVersioning(_ input: PutBucketVersioningRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1490,6 +1491,25 @@ extension S3 { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension S3 { + /// This operation is not supported by directory buckets. Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets permission. For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listBucketsPaginator( + _ input: ListBucketsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listBuckets, + inputKey: \ListBucketsRequest.continuationToken, + outputKey: \ListBucketsOutput.continuationToken, + logger: logger + ) + } + /// Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the request. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. /// For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. /// Return PaginatorSequence for operation. @@ -1510,7 +1530,7 @@ extension S3 { ) } - /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets. General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't return prefixes that are related only to in-progress multipart uploads. Directory buckets - For directory buckets, ListObjectsV2 response includes the prefixes that are related only to in-progress multipart uploads. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Sorting order of returned objects General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names. Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects. The following operations are related to ListObjectsV2: GetObject PutObject CreateBucket /// Return PaginatorSequence for operation. /// @@ -1551,6 +1571,15 @@ extension S3 { } } +extension S3.ListBucketsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> S3.ListBucketsRequest { + return .init( + continuationToken: token, + maxBuckets: self.maxBuckets + ) + } +} + extension S3.ListDirectoryBucketsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> S3.ListDirectoryBucketsRequest { return .init( diff --git a/Sources/Soto/Services/S3/S3_shapes.swift b/Sources/Soto/Services/S3/S3_shapes.swift index 78bea02778..26d83db622 100644 --- a/Sources/Soto/Services/S3/S3_shapes.swift +++ b/Sources/Soto/Services/S3/S3_shapes.swift @@ -4438,13 +4438,13 @@ extension S3 { } public struct HeadBucketOutput: AWSDecodableShape { - /// Indicates whether the bucket name used in the request is an access point alias. This functionality is not supported for directory buckets. + /// Indicates whether the bucket name used in the request is an access point alias. For directory buckets, the value of this field is false. public let accessPointAlias: Bool? /// The name of the location where the bucket will be created. For directory buckets, the AZ ID of the Availability Zone where the bucket is created. An example AZ ID value is usw2-az1. This functionality is only supported by directory buckets. public let bucketLocationName: String? /// The type of location where the bucket is created. This functionality is only supported by directory buckets. public let bucketLocationType: LocationType? - /// The Region that the bucket is located. This functionality is not supported for directory buckets. + /// The Region that the bucket is located. public let bucketRegion: String? public init(accessPointAlias: Bool? = nil, bucketLocationName: String? = nil, bucketLocationType: LocationType? = nil, bucketRegion: String? = nil) { @@ -5362,20 +5362,50 @@ extension S3 { /// The list of buckets owned by the requester. @OptionalCustomCoding> public var buckets: [Bucket]? + /// ContinuationToken is included in the response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket. + public let continuationToken: String? /// The owner of the buckets listed. public let owner: Owner? - public init(buckets: [Bucket]? = nil, owner: Owner? = nil) { + public init(buckets: [Bucket]? = nil, continuationToken: String? = nil, owner: Owner? = nil) { self.buckets = buckets + self.continuationToken = continuationToken self.owner = owner } private enum CodingKeys: String, CodingKey { case buckets = "Buckets" + case continuationToken = "ContinuationToken" case owner = "Owner" } } + public struct ListBucketsRequest: AWSEncodableShape { + /// ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results. Length Constraints: Minimum length of 0. Maximum length of 1024. Required: No. + public let continuationToken: String? + /// Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response. + public let maxBuckets: Int? + + public init(continuationToken: String? = nil, maxBuckets: Int? = nil) { + self.continuationToken = continuationToken + self.maxBuckets = maxBuckets + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.continuationToken, key: "continuation-token") + request.encodeQuery(self.maxBuckets, key: "max-buckets") + } + + public func validate(name: String) throws { + try self.validate(self.maxBuckets, name: "maxBuckets", parent: name, max: 1000) + try self.validate(self.maxBuckets, name: "maxBuckets", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + public struct ListDirectoryBucketsOutput: AWSDecodableShape { public struct _BucketsEncoding: ArrayCoderProperties { public static let member = "Bucket" } @@ -5397,7 +5427,7 @@ extension S3 { } public struct ListDirectoryBucketsRequest: AWSEncodableShape { - /// ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results. + /// ContinuationToken indicates to Amazon S3 that the list is being continued on buckets in this account with a token. ContinuationToken is obfuscated and is not a real bucket name. You can use this ContinuationToken for the pagination of the list results. public let continuationToken: String? /// Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response. public let maxDirectoryBuckets: Int? @@ -5687,7 +5717,7 @@ extension S3 { public let contents: [Object]? /// Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value. public let delimiter: String? - /// Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. + /// Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines. When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png will appear as test_file%283%29.png. public let encodingType: EncodingType? /// A flag that indicates whether Amazon S3 returned all of the results that satisfied the search criteria. public let isTruncated: Bool? @@ -5879,7 +5909,7 @@ extension S3 { public let continuationToken: String? /// A delimiter is a character that you use to group keys. Directory buckets - For directory buckets, / is the only supported delimiter. Directory buckets - When you query ListObjectsV2 with a delimiter during in-progress multipart uploads, the CommonPrefixes response parameter contains the prefixes that are associated with the in-progress multipart uploads. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide. public let delimiter: String? - /// Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. + /// Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines. When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png will appear as test_file%283%29.png. public let encodingType: EncodingType? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -6637,7 +6667,7 @@ extension S3 { } public struct PartitionedPrefix: AWSEncodableShape & AWSDecodableShape { - /// Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime. + /// Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime. For DeliveryTime, the time in the log file names corresponds to the delivery time for the log files. For EventTime, The logs delivered are for a specific day only. The year, month, and day correspond to the day on which the event occurred, and the hour, minutes and seconds are set to 00 in the key. public let partitionDateSource: PartitionDateSource? public init(partitionDateSource: PartitionDateSource? = nil) { @@ -6706,7 +6736,7 @@ extension S3 { public let blockPublicPolicy: Bool? /// Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket. Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. public let ignorePublicAcls: Bool? - /// Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Service principals and authorized users within this account if the bucket has a public policy. Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. + /// Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy. Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. public let restrictPublicBuckets: Bool? public init(blockPublicAcls: Bool? = nil, blockPublicPolicy: Bool? = nil, ignorePublicAcls: Bool? = nil, restrictPublicBuckets: Bool? = nil) { @@ -8016,7 +8046,7 @@ extension S3 { } public struct RecordsEvent: AWSDecodableShape { - /// The byte array of partial, one or more result records. + /// The byte array of partial, one or more result records. S3 Select doesn't guarantee that a record will be self-contained in one record frame. To ensure continuous streaming of data, S3 Select might split the same record across multiple record frames instead of aggregating the results in memory. Some S3 clients (for example, the SDK for Java) handle this behavior by creating a ByteStream out of the response by default. Other clients might not handle this behavior by default. In those cases, you must aggregate the results on the client side and parse the response. public let payload: AWSEventPayload public init(payload: AWSEventPayload) { diff --git a/Sources/Soto/Services/SESv2/SESv2_shapes.swift b/Sources/Soto/Services/SESv2/SESv2_shapes.swift index 61fdaa770c..74145fed86 100644 --- a/Sources/Soto/Services/SESv2/SESv2_shapes.swift +++ b/Sources/Soto/Services/SESv2/SESv2_shapes.swift @@ -4174,11 +4174,11 @@ extension SESv2 { /// Indicates whether or not your account should have production access in the current Amazon Web Services Region. If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case. public let productionAccessEnabled: Bool? /// A description of the types of email that you plan to send. - public let useCaseDescription: String + public let useCaseDescription: String? /// The URL of your website. This information helps us better understand the type of content that you plan to send. public let websiteURL: String - public init(additionalContactEmailAddresses: [String]? = nil, contactLanguage: ContactLanguage? = nil, mailType: MailType, productionAccessEnabled: Bool? = nil, useCaseDescription: String, websiteURL: String) { + public init(additionalContactEmailAddresses: [String]? = nil, contactLanguage: ContactLanguage? = nil, mailType: MailType, productionAccessEnabled: Bool? = nil, useCaseDescription: String? = nil, websiteURL: String) { self.additionalContactEmailAddresses = additionalContactEmailAddresses self.contactLanguage = contactLanguage self.mailType = mailType @@ -4196,7 +4196,6 @@ extension SESv2 { try self.validate(self.additionalContactEmailAddresses, name: "additionalContactEmailAddresses", parent: name, max: 4) try self.validate(self.additionalContactEmailAddresses, name: "additionalContactEmailAddresses", parent: name, min: 1) try self.validate(self.useCaseDescription, name: "useCaseDescription", parent: name, max: 5000) - try self.validate(self.useCaseDescription, name: "useCaseDescription", parent: name, min: 1) try self.validate(self.websiteURL, name: "websiteURL", parent: name, max: 1000) try self.validate(self.websiteURL, name: "websiteURL", parent: name, min: 1) try self.validate(self.websiteURL, name: "websiteURL", parent: name, pattern: "^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?$") diff --git a/Sources/Soto/Services/SFN/SFN_api.swift b/Sources/Soto/Services/SFN/SFN_api.swift index 703206a537..0cd07c32ff 100644 --- a/Sources/Soto/Services/SFN/SFN_api.swift +++ b/Sources/Soto/Services/SFN/SFN_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS SFN service. /// -/// Step Functions Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide . If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn. +/// Step Functions Step Functions coordinates the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide . If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn. public struct SFN: AWSService { // MARK: Member variables @@ -104,7 +104,7 @@ public struct SFN: AWSService { ) } - /// Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide. If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine. This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, and TracingConfiguration. The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different. + /// Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide. If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine. For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine. The execution history and state machine definition will be encrypted with the key applied to the State Machine. This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, TracingConfiguration, and EncryptionConfiguration The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different. @Sendable public func createStateMachine(_ input: CreateStateMachineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateStateMachineOutput { return try await self.client.execute( @@ -403,7 +403,7 @@ public struct SFN: AWSService { ) } - /// Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed. + /// Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed. For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role. A caller can mark a task as fail without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted. @Sendable public func sendTaskFailure(_ input: SendTaskFailureInput, logger: Logger = AWSClient.loggingDisabled) async throws -> SendTaskFailureOutput { return try await self.client.execute( @@ -470,7 +470,7 @@ public struct SFN: AWSService { ) } - /// Stops an execution. This API action is not supported by EXPRESS state machines. + /// Stops an execution. This API action is not supported by EXPRESS state machines. For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role. A caller can stop an execution without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted. @Sendable public func stopExecution(_ input: StopExecutionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StopExecutionOutput { return try await self.client.execute( @@ -536,7 +536,7 @@ public struct SFN: AWSService { ) } - /// Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN. The following are some examples of qualified and unqualified state machine ARNs: The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine. arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException. The following qualified state machine ARN refers to an alias named PROD. arn::states:::stateMachine: If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias. The following unqualified state machine ARN refers to a state machine named myStateMachine. arn::states:::stateMachine: After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine. Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1. All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn. + /// Updates an existing state machine by modifying its definition, roleArn, loggingConfiguration, or EncryptionConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN. The following are some examples of qualified and unqualified state machine ARNs: The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine. arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException. The following qualified state machine ARN refers to an alias named PROD. arn::states:::stateMachine: If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias. The following unqualified state machine ARN refers to a state machine named myStateMachine. arn::states:::stateMachine: After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine. Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1. All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn. @Sendable public func updateStateMachine(_ input: UpdateStateMachineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateStateMachineOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/SFN/SFN_shapes.swift b/Sources/Soto/Services/SFN/SFN_shapes.swift index a1821b9182..699f7b08b0 100644 --- a/Sources/Soto/Services/SFN/SFN_shapes.swift +++ b/Sources/Soto/Services/SFN/SFN_shapes.swift @@ -26,6 +26,12 @@ import Foundation extension SFN { // MARK: Enums + public enum EncryptionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case awsOwnedKey = "AWS_OWNED_KEY" + case customerManagedKmsKey = "CUSTOMER_MANAGED_KMS_KEY" + public var description: String { return self.rawValue } + } + public enum ExecutionRedriveFilter: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case notRedriven = "NOT_REDRIVEN" case redriven = "REDRIVEN" @@ -114,6 +120,12 @@ extension SFN { public var description: String { return self.rawValue } } + public enum IncludedData: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allData = "ALL_DATA" + case metadataOnly = "METADATA_ONLY" + public var description: String { return self.rawValue } + } + public enum InspectionLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case debug = "DEBUG" case info = "INFO" @@ -357,17 +369,21 @@ extension SFN { } public struct CreateActivityInput: AWSEncodableShape { + /// Settings to configure server-side encryption. + public let encryptionConfiguration: EncryptionConfiguration? /// The name of the activity to create. This name must be unique for your Amazon Web Services account and region for 90 days. For more information, see Limits Related to State Machine Executions in the Step Functions Developer Guide. A name must not contain: white space brackets { } [ ] wildcard characters ? * special characters " # % \ ^ | ~ ` $ & , ; : / control characters (U+0000-001F, U+007F-009F) To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. public let name: String /// The list of tags to add to a resource. An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide, and Controlling Access Using IAM Tags. Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @. public let tags: [Tag]? - public init(name: String, tags: [Tag]? = nil) { + public init(encryptionConfiguration: EncryptionConfiguration? = nil, name: String, tags: [Tag]? = nil) { + self.encryptionConfiguration = encryptionConfiguration self.name = name self.tags = tags } public func validate(name: String) throws { + try self.encryptionConfiguration?.validate(name: "\(name).encryptionConfiguration") try self.validate(self.name, name: "name", parent: name, max: 80) try self.validate(self.name, name: "name", parent: name, min: 1) try self.tags?.forEach { @@ -376,6 +392,7 @@ extension SFN { } private enum CodingKeys: String, CodingKey { + case encryptionConfiguration = "encryptionConfiguration" case name = "name" case tags = "tags" } @@ -451,6 +468,8 @@ extension SFN { public struct CreateStateMachineInput: AWSEncodableShape { /// The Amazon States Language definition of the state machine. See Amazon States Language. public let definition: String + /// Settings to configure server-side encryption. + public let encryptionConfiguration: EncryptionConfiguration? /// Defines what execution history events are logged and where they are logged. By default, the level is set to OFF. For more information see Log Levels in the Step Functions User Guide. public let loggingConfiguration: LoggingConfiguration? /// The name of the state machine. A name must not contain: white space brackets { } [ ] wildcard characters ? * special characters " # % \ ^ | ~ ` $ & , ; : / control characters (U+0000-001F, U+007F-009F) To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. @@ -468,8 +487,9 @@ extension SFN { /// Sets description about the state machine version. You can only set the description if the publish parameter is set to true. Otherwise, if you set versionDescription, but publish to false, this API action throws ValidationException. public let versionDescription: String? - public init(definition: String, loggingConfiguration: LoggingConfiguration? = nil, name: String, publish: Bool? = nil, roleArn: String, tags: [Tag]? = nil, tracingConfiguration: TracingConfiguration? = nil, type: StateMachineType? = nil, versionDescription: String? = nil) { + public init(definition: String, encryptionConfiguration: EncryptionConfiguration? = nil, loggingConfiguration: LoggingConfiguration? = nil, name: String, publish: Bool? = nil, roleArn: String, tags: [Tag]? = nil, tracingConfiguration: TracingConfiguration? = nil, type: StateMachineType? = nil, versionDescription: String? = nil) { self.definition = definition + self.encryptionConfiguration = encryptionConfiguration self.loggingConfiguration = loggingConfiguration self.name = name self.publish = publish @@ -483,6 +503,7 @@ extension SFN { public func validate(name: String) throws { try self.validate(self.definition, name: "definition", parent: name, max: 1048576) try self.validate(self.definition, name: "definition", parent: name, min: 1) + try self.encryptionConfiguration?.validate(name: "\(name).encryptionConfiguration") try self.loggingConfiguration?.validate(name: "\(name).loggingConfiguration") try self.validate(self.name, name: "name", parent: name, max: 80) try self.validate(self.name, name: "name", parent: name, min: 1) @@ -496,6 +517,7 @@ extension SFN { private enum CodingKeys: String, CodingKey { case definition = "definition" + case encryptionConfiguration = "encryptionConfiguration" case loggingConfiguration = "loggingConfiguration" case name = "name" case publish = "publish" @@ -639,18 +661,22 @@ extension SFN { public let activityArn: String /// The date the activity is created. public let creationDate: Date + /// Settings for configured server-side encryption. + public let encryptionConfiguration: EncryptionConfiguration? /// The name of the activity. A name must not contain: white space brackets { } [ ] wildcard characters ? * special characters " # % \ ^ | ~ ` $ & , ; : / control characters (U+0000-001F, U+007F-009F) To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. public let name: String - public init(activityArn: String, creationDate: Date, name: String) { + public init(activityArn: String, creationDate: Date, encryptionConfiguration: EncryptionConfiguration? = nil, name: String) { self.activityArn = activityArn self.creationDate = creationDate + self.encryptionConfiguration = encryptionConfiguration self.name = name } private enum CodingKeys: String, CodingKey { case activityArn = "activityArn" case creationDate = "creationDate" + case encryptionConfiguration = "encryptionConfiguration" case name = "name" } } @@ -658,9 +684,12 @@ extension SFN { public struct DescribeExecutionInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the execution to describe. public let executionArn: String + /// If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call DescribeStateMachine API with includedData = METADATA_ONLY to get a successful response without the encrypted definition. + public let includedData: IncludedData? - public init(executionArn: String) { + public init(executionArn: String, includedData: IncludedData? = nil) { self.executionArn = executionArn + self.includedData = includedData } public func validate(name: String) throws { @@ -670,6 +699,7 @@ extension SFN { private enum CodingKeys: String, CodingKey { case executionArn = "executionArn" + case includedData = "includedData" } } @@ -889,9 +919,12 @@ extension SFN { public struct DescribeStateMachineForExecutionInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the execution you want state machine information for. public let executionArn: String + /// If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition. + public let includedData: IncludedData? - public init(executionArn: String) { + public init(executionArn: String, includedData: IncludedData? = nil) { self.executionArn = executionArn + self.includedData = includedData } public func validate(name: String) throws { @@ -901,12 +934,15 @@ extension SFN { private enum CodingKeys: String, CodingKey { case executionArn = "executionArn" + case includedData = "includedData" } } public struct DescribeStateMachineForExecutionOutput: AWSDecodableShape { /// The Amazon States Language definition of the state machine. See Amazon States Language. public let definition: String + /// Settings to configure server-side encryption. + public let encryptionConfiguration: EncryptionConfiguration? /// A user-defined or an auto-generated string that identifies a Map state. This field is returned only if the executionArn is a child workflow execution that was started by a Distributed Map state. public let label: String? public let loggingConfiguration: LoggingConfiguration? @@ -925,8 +961,9 @@ extension SFN { /// The date and time the state machine associated with an execution was updated. For a newly created state machine, this is the creation date. public let updateDate: Date - public init(definition: String, label: String? = nil, loggingConfiguration: LoggingConfiguration? = nil, mapRunArn: String? = nil, name: String, revisionId: String? = nil, roleArn: String, stateMachineArn: String, tracingConfiguration: TracingConfiguration? = nil, updateDate: Date) { + public init(definition: String, encryptionConfiguration: EncryptionConfiguration? = nil, label: String? = nil, loggingConfiguration: LoggingConfiguration? = nil, mapRunArn: String? = nil, name: String, revisionId: String? = nil, roleArn: String, stateMachineArn: String, tracingConfiguration: TracingConfiguration? = nil, updateDate: Date) { self.definition = definition + self.encryptionConfiguration = encryptionConfiguration self.label = label self.loggingConfiguration = loggingConfiguration self.mapRunArn = mapRunArn @@ -940,6 +977,7 @@ extension SFN { private enum CodingKeys: String, CodingKey { case definition = "definition" + case encryptionConfiguration = "encryptionConfiguration" case label = "label" case loggingConfiguration = "loggingConfiguration" case mapRunArn = "mapRunArn" @@ -953,10 +991,13 @@ extension SFN { } public struct DescribeStateMachineInput: AWSEncodableShape { + /// If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition. When calling a labelled ARN for an encrypted state machine, the includedData = METADATA_ONLY parameter will not apply because Step Functions needs to decrypt the entire state machine definition to get the Distributed Map state’s definition. In this case, the API caller needs to have kms:Decrypt permission. + public let includedData: IncludedData? /// The Amazon Resource Name (ARN) of the state machine for which you want the information. If you specify a state machine version ARN, this API returns details about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1. public let stateMachineArn: String - public init(stateMachineArn: String) { + public init(includedData: IncludedData? = nil, stateMachineArn: String) { + self.includedData = includedData self.stateMachineArn = stateMachineArn } @@ -966,6 +1007,7 @@ extension SFN { } private enum CodingKeys: String, CodingKey { + case includedData = "includedData" case stateMachineArn = "stateMachineArn" } } @@ -973,10 +1015,12 @@ extension SFN { public struct DescribeStateMachineOutput: AWSDecodableShape { /// The date the state machine is created. For a state machine version, creationDate is the date the version was created. public let creationDate: Date - /// The Amazon States Language definition of the state machine. See Amazon States Language. + /// The Amazon States Language definition of the state machine. See Amazon States Language. If called with includedData = METADATA_ONLY, the returned definition will be {}. public let definition: String /// The description of the state machine version. public let description: String? + /// Settings to configure server-side encryption. + public let encryptionConfiguration: EncryptionConfiguration? /// A user-defined or an auto-generated string that identifies a Map state. This parameter is present only if the stateMachineArn specified in input is a qualified state machine ARN. public let label: String? public let loggingConfiguration: LoggingConfiguration? @@ -995,10 +1039,11 @@ extension SFN { /// The type of the state machine (STANDARD or EXPRESS). public let type: StateMachineType - public init(creationDate: Date, definition: String, description: String? = nil, label: String? = nil, loggingConfiguration: LoggingConfiguration? = nil, name: String, revisionId: String? = nil, roleArn: String, stateMachineArn: String, status: StateMachineStatus? = nil, tracingConfiguration: TracingConfiguration? = nil, type: StateMachineType) { + public init(creationDate: Date, definition: String, description: String? = nil, encryptionConfiguration: EncryptionConfiguration? = nil, label: String? = nil, loggingConfiguration: LoggingConfiguration? = nil, name: String, revisionId: String? = nil, roleArn: String, stateMachineArn: String, status: StateMachineStatus? = nil, tracingConfiguration: TracingConfiguration? = nil, type: StateMachineType) { self.creationDate = creationDate self.definition = definition self.description = description + self.encryptionConfiguration = encryptionConfiguration self.label = label self.loggingConfiguration = loggingConfiguration self.name = name @@ -1014,6 +1059,7 @@ extension SFN { case creationDate = "creationDate" case definition = "definition" case description = "description" + case encryptionConfiguration = "encryptionConfiguration" case label = "label" case loggingConfiguration = "loggingConfiguration" case name = "name" @@ -1026,6 +1072,34 @@ extension SFN { } } + public struct EncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call GenerateDataKey. Only applies to customer managed keys. + public let kmsDataKeyReusePeriodSeconds: Int? + /// An alias, alias ARN, key ID, or key ARN of a symmetric encryption KMS key to encrypt data. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN. + public let kmsKeyId: String? + /// Encryption type + public let type: EncryptionType + + public init(kmsDataKeyReusePeriodSeconds: Int? = nil, kmsKeyId: String? = nil, type: EncryptionType) { + self.kmsDataKeyReusePeriodSeconds = kmsDataKeyReusePeriodSeconds + self.kmsKeyId = kmsKeyId + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.kmsDataKeyReusePeriodSeconds, name: "kmsDataKeyReusePeriodSeconds", parent: name, max: 900) + try self.validate(self.kmsDataKeyReusePeriodSeconds, name: "kmsDataKeyReusePeriodSeconds", parent: name, min: 60) + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048) + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case kmsDataKeyReusePeriodSeconds = "kmsDataKeyReusePeriodSeconds" + case kmsKeyId = "kmsKeyId" + case type = "type" + } + } + public struct ExecutionAbortedEventDetails: AWSDecodableShape { /// A more detailed explanation of the cause of the failure. public let cause: String? @@ -2353,7 +2427,7 @@ extension SFN { public func validate(name: String) throws { try self.validate(self.cause, name: "cause", parent: name, max: 32768) try self.validate(self.error, name: "error", parent: name, max: 256) - try self.validate(self.taskToken, name: "taskToken", parent: name, max: 1024) + try self.validate(self.taskToken, name: "taskToken", parent: name, max: 2048) try self.validate(self.taskToken, name: "taskToken", parent: name, min: 1) } @@ -2377,7 +2451,7 @@ extension SFN { } public func validate(name: String) throws { - try self.validate(self.taskToken, name: "taskToken", parent: name, max: 1024) + try self.validate(self.taskToken, name: "taskToken", parent: name, max: 2048) try self.validate(self.taskToken, name: "taskToken", parent: name, min: 1) } @@ -2403,7 +2477,7 @@ extension SFN { public func validate(name: String) throws { try self.validate(self.output, name: "output", parent: name, max: 262144) - try self.validate(self.taskToken, name: "taskToken", parent: name, max: 1024) + try self.validate(self.taskToken, name: "taskToken", parent: name, max: 2048) try self.validate(self.taskToken, name: "taskToken", parent: name, min: 1) } @@ -2470,6 +2544,8 @@ extension SFN { } public struct StartSyncExecutionInput: AWSEncodableShape { + /// If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition. + public let includedData: IncludedData? /// The string that contains the JSON input data for the execution, for example: "input": "{\"first_name\" : \"test\"}" If you don't include any JSON input data, you still must include the two braces, for example: "input": "{}" Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding. public let input: String? /// The name of the execution. @@ -2479,7 +2555,8 @@ extension SFN { /// Passes the X-Ray trace header. The trace header can also be passed in the request payload. public let traceHeader: String? - public init(input: String? = nil, name: String? = nil, stateMachineArn: String, traceHeader: String? = nil) { + public init(includedData: IncludedData? = nil, input: String? = nil, name: String? = nil, stateMachineArn: String, traceHeader: String? = nil) { + self.includedData = includedData self.input = input self.name = name self.stateMachineArn = stateMachineArn @@ -2497,6 +2574,7 @@ extension SFN { } private enum CodingKeys: String, CodingKey { + case includedData = "includedData" case input = "input" case name = "name" case stateMachineArn = "stateMachineArn" @@ -3176,6 +3254,8 @@ extension SFN { public struct UpdateStateMachineInput: AWSEncodableShape { /// The Amazon States Language definition of the state machine. See Amazon States Language. public let definition: String? + /// Settings to configure server-side encryption. + public let encryptionConfiguration: EncryptionConfiguration? /// Use the LoggingConfiguration data type to set CloudWatch Logs options. public let loggingConfiguration: LoggingConfiguration? /// Specifies whether the state machine version is published. The default is false. To publish a version after updating the state machine, set publish to true. @@ -3189,8 +3269,9 @@ extension SFN { /// An optional description of the state machine version to publish. You can only specify the versionDescription parameter if you've set publish to true. public let versionDescription: String? - public init(definition: String? = nil, loggingConfiguration: LoggingConfiguration? = nil, publish: Bool? = nil, roleArn: String? = nil, stateMachineArn: String, tracingConfiguration: TracingConfiguration? = nil, versionDescription: String? = nil) { + public init(definition: String? = nil, encryptionConfiguration: EncryptionConfiguration? = nil, loggingConfiguration: LoggingConfiguration? = nil, publish: Bool? = nil, roleArn: String? = nil, stateMachineArn: String, tracingConfiguration: TracingConfiguration? = nil, versionDescription: String? = nil) { self.definition = definition + self.encryptionConfiguration = encryptionConfiguration self.loggingConfiguration = loggingConfiguration self.publish = publish self.roleArn = roleArn @@ -3202,6 +3283,7 @@ extension SFN { public func validate(name: String) throws { try self.validate(self.definition, name: "definition", parent: name, max: 1048576) try self.validate(self.definition, name: "definition", parent: name, min: 1) + try self.encryptionConfiguration?.validate(name: "\(name).encryptionConfiguration") try self.loggingConfiguration?.validate(name: "\(name).loggingConfiguration") try self.validate(self.roleArn, name: "roleArn", parent: name, max: 256) try self.validate(self.roleArn, name: "roleArn", parent: name, min: 1) @@ -3212,6 +3294,7 @@ extension SFN { private enum CodingKeys: String, CodingKey { case definition = "definition" + case encryptionConfiguration = "encryptionConfiguration" case loggingConfiguration = "loggingConfiguration" case publish = "publish" case roleArn = "roleArn" @@ -3312,6 +3395,7 @@ extension SFN { /// Error enum for SFN public struct SFNErrorType: AWSErrorType { enum Code: String { + case activityAlreadyExists = "ActivityAlreadyExists" case activityDoesNotExist = "ActivityDoesNotExist" case activityLimitExceeded = "ActivityLimitExceeded" case activityWorkerLimitExceeded = "ActivityWorkerLimitExceeded" @@ -3322,12 +3406,16 @@ public struct SFNErrorType: AWSErrorType { case executionNotRedrivable = "ExecutionNotRedrivable" case invalidArn = "InvalidArn" case invalidDefinition = "InvalidDefinition" + case invalidEncryptionConfiguration = "InvalidEncryptionConfiguration" case invalidExecutionInput = "InvalidExecutionInput" case invalidLoggingConfiguration = "InvalidLoggingConfiguration" case invalidName = "InvalidName" case invalidOutput = "InvalidOutput" case invalidToken = "InvalidToken" case invalidTracingConfiguration = "InvalidTracingConfiguration" + case kmsAccessDeniedException = "KmsAccessDeniedException" + case kmsInvalidStateException = "KmsInvalidStateException" + case kmsThrottlingException = "KmsThrottlingException" case missingRequiredParameter = "MissingRequiredParameter" case resourceNotFound = "ResourceNotFound" case serviceQuotaExceededException = "ServiceQuotaExceededException" @@ -3360,6 +3448,8 @@ public struct SFNErrorType: AWSErrorType { /// return error code string public var errorCode: String { self.error.rawValue } + /// Activity already exists. EncryptionConfiguration may not be updated. + public static var activityAlreadyExists: Self { .init(.activityAlreadyExists) } /// The specified activity does not exist. public static var activityDoesNotExist: Self { .init(.activityDoesNotExist) } /// The maximum number of activities has been reached. Existing activities must be deleted before a new activity can be created. @@ -3380,8 +3470,11 @@ public struct SFNErrorType: AWSErrorType { public static var invalidArn: Self { .init(.invalidArn) } /// The provided Amazon States Language definition is not valid. public static var invalidDefinition: Self { .init(.invalidDefinition) } + /// Received when encryptionConfiguration is specified but various conditions exist which make the configuration invalid. For example, if type is set to CUSTOMER_MANAGED_KMS_KEY, but kmsKeyId is null, or kmsDataKeyReusePeriodSeconds is not between 60 and 900, or the KMS key is not symmetric or inactive. + public static var invalidEncryptionConfiguration: Self { .init(.invalidEncryptionConfiguration) } /// The provided JSON input data is not valid. public static var invalidExecutionInput: Self { .init(.invalidExecutionInput) } + /// Configuration is not valid. public static var invalidLoggingConfiguration: Self { .init(.invalidLoggingConfiguration) } /// The provided name is not valid. public static var invalidName: Self { .init(.invalidName) } @@ -3391,6 +3484,12 @@ public struct SFNErrorType: AWSErrorType { public static var invalidToken: Self { .init(.invalidToken) } /// Your tracingConfiguration key does not match, or enabled has not been set to true or false. public static var invalidTracingConfiguration: Self { .init(.invalidTracingConfiguration) } + /// Either your KMS key policy or API caller does not have the required permissions. + public static var kmsAccessDeniedException: Self { .init(.kmsAccessDeniedException) } + /// The KMS key is not in valid state, for example: Disabled or Deleted. + public static var kmsInvalidStateException: Self { .init(.kmsInvalidStateException) } + /// Received when KMS returns ThrottlingException for a KMS call that Step Functions makes on behalf of the caller. + public static var kmsThrottlingException: Self { .init(.kmsThrottlingException) } /// Request is missing a required parameter. This error occurs if both definition and roleArn are not specified. public static var missingRequiredParameter: Self { .init(.missingRequiredParameter) } /// Could not find the referenced resource. @@ -3405,6 +3504,7 @@ public struct SFNErrorType: AWSErrorType { public static var stateMachineDoesNotExist: Self { .init(.stateMachineDoesNotExist) } /// The maximum number of state machines has been reached. Existing state machines must be deleted before a new state machine can be created. public static var stateMachineLimitExceeded: Self { .init(.stateMachineLimitExceeded) } + /// State machine type is not supported. public static var stateMachineTypeNotSupported: Self { .init(.stateMachineTypeNotSupported) } /// The activity does not exist. public static var taskDoesNotExist: Self { .init(.taskDoesNotExist) } diff --git a/Sources/Soto/Services/SSM/SSM_api.swift b/Sources/Soto/Services/SSM/SSM_api.swift index 7512fa9b47..c59a1b097e 100644 --- a/Sources/Soto/Services/SSM/SSM_api.swift +++ b/Sources/Soto/Services/SSM/SSM_api.swift @@ -622,7 +622,7 @@ public struct SSM: AWSService { ) } - /// Provides information about one or more of your managed nodes, including the operating system platform, SSM Agent version, association status, and IP address. This operation does not return information for nodes that are either Stopped or Terminated. If you specify one or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error. The IamRole field returned for this API operation is the Identity and Access Management (IAM) role assigned to on-premises managed nodes. This operation does not return the IAM role for EC2 instances. + /// Provides information about one or more of your managed nodes, including the operating system platform, SSM Agent version, association status, and IP address. This operation does not return information for nodes that are either Stopped or Terminated. If you specify one or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error. The IamRole field returned for this API operation is the role assigned to an Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or the role assigned to an on-premises managed node. @Sendable public func describeInstanceInformation(_ input: DescribeInstanceInformationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeInstanceInformationResult { return try await self.client.execute( @@ -869,7 +869,7 @@ public struct SSM: AWSService { ) } - /// Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines. The following section lists the properties that can be used in filters for each major operating system type: AMAZON_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY CENTOS Valid properties: PRODUCT | CLASSIFICATION | SEVERITY DEBIAN Valid properties: PRODUCT | PRIORITY MACOS Valid properties: PRODUCT | CLASSIFICATION ORACLE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY REDHAT_ENTERPRISE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY SUSE Valid properties: PRODUCT | CLASSIFICATION | SEVERITY UBUNTU Valid properties: PRODUCT | PRIORITY WINDOWS Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY + /// Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines. The following section lists the properties that can be used in filters for each major operating system type: AMAZON_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2023 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY CENTOS Valid properties: PRODUCT | CLASSIFICATION | SEVERITY DEBIAN Valid properties: PRODUCT | PRIORITY MACOS Valid properties: PRODUCT | CLASSIFICATION ORACLE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY REDHAT_ENTERPRISE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY SUSE Valid properties: PRODUCT | CLASSIFICATION | SEVERITY UBUNTU Valid properties: PRODUCT | PRIORITY WINDOWS Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY @Sendable public func describePatchProperties(_ input: DescribePatchPropertiesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribePatchPropertiesResult { return try await self.client.execute( @@ -934,7 +934,7 @@ public struct SSM: AWSService { ) } - /// Returns detailed information about command execution for an invocation or plugin. GetCommandInvocation only gives the execution status of a plugin in a document. To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes, use ListCommands. + /// Returns detailed information about command execution for an invocation or plugin. The Run Command API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your resources might not be immediately visible to all subsequent commands you run. You should keep this in mind when you carry out an API command that immediately follows a previous API command. GetCommandInvocation only gives the execution status of a plugin in a document. To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes, use ListCommands. @Sendable public func getCommandInvocation(_ input: GetCommandInvocationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCommandInvocationResult { return try await self.client.execute( @@ -2094,7 +2094,7 @@ extension SSM { ) } - /// Provides information about one or more of your managed nodes, including the operating system platform, SSM Agent version, association status, and IP address. This operation does not return information for nodes that are either Stopped or Terminated. If you specify one or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error. The IamRole field returned for this API operation is the Identity and Access Management (IAM) role assigned to on-premises managed nodes. This operation does not return the IAM role for EC2 instances. + /// Provides information about one or more of your managed nodes, including the operating system platform, SSM Agent version, association status, and IP address. This operation does not return information for nodes that are either Stopped or Terminated. If you specify one or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error. The IamRole field returned for this API operation is the role assigned to an Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or the role assigned to an on-premises managed node. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -2436,7 +2436,7 @@ extension SSM { ) } - /// Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines. The following section lists the properties that can be used in filters for each major operating system type: AMAZON_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY CENTOS Valid properties: PRODUCT | CLASSIFICATION | SEVERITY DEBIAN Valid properties: PRODUCT | PRIORITY MACOS Valid properties: PRODUCT | CLASSIFICATION ORACLE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY REDHAT_ENTERPRISE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY SUSE Valid properties: PRODUCT | CLASSIFICATION | SEVERITY UBUNTU Valid properties: PRODUCT | PRIORITY WINDOWS Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY + /// Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines. The following section lists the properties that can be used in filters for each major operating system type: AMAZON_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2023 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY CENTOS Valid properties: PRODUCT | CLASSIFICATION | SEVERITY DEBIAN Valid properties: PRODUCT | PRIORITY MACOS Valid properties: PRODUCT | CLASSIFICATION ORACLE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY REDHAT_ENTERPRISE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY SUSE Valid properties: PRODUCT | CLASSIFICATION | SEVERITY UBUNTU Valid properties: PRODUCT | PRIORITY WINDOWS Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/SSM/SSM_shapes.swift b/Sources/Soto/Services/SSM/SSM_shapes.swift index 1ab73cd60e..dbbdb4f097 100644 --- a/Sources/Soto/Services/SSM/SSM_shapes.swift +++ b/Sources/Soto/Services/SSM/SSM_shapes.swift @@ -3187,7 +3187,7 @@ extension SSM { public let operatingSystem: OperatingSystem? /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let rejectedPatches: [String]? - /// The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified. BLOCK: Packages in the Rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the Rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as InstalledRejected. + /// The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified. BLOCK All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. public let rejectedPatchesAction: PatchAction? /// Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only. public let sources: [PatchSource]? @@ -6960,8 +6960,7 @@ extension SSM { public let name: String? /// The priority of the task when it runs. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel. public let priority: Int? - /// The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service - /// (Amazon SNS) notifications for maintenance window Run Command tasks. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The targets where the task should run. public let targets: [Target]? @@ -7731,7 +7730,7 @@ extension SSM { public let associationStatus: String? /// The fully qualified host name of the managed node. public let computerName: String? - /// The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed node. This call doesn't return the IAM role for Amazon Elastic Compute Cloud (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference. + /// The role assigned to an Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or the role assigned to an on-premises managed node. This call doesn't return the IAM role for unmanaged Amazon EC2 instances (instances not configured for Systems Manager). To retrieve the role for an unmanaged instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference. public let iamRole: String? /// The managed node ID. public let instanceId: String? @@ -9689,8 +9688,7 @@ extension SSM { public let outputS3KeyPrefix: String? /// The parameters for the RUN_COMMAND task execution. public let parameters: [String: [String]]? - /// The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service - /// (Amazon SNS) notifications for maintenance window Run Command tasks. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// If this time is reached and the command hasn't already started running, it doesn't run. public let timeoutSeconds: Int? @@ -9813,8 +9811,7 @@ extension SSM { public let name: String? /// The priority of the task in the maintenance window. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel. public let priority: Int? - /// The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service - /// (Amazon SNS) notifications for maintenance window Run Command tasks. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The targets (either managed nodes or tags). Managed nodes are specified using Key=instanceids,Values=,. Tags are specified using Key=,Values=. public let targets: [Target]? @@ -10994,7 +10991,7 @@ extension SSM { public let baselineId: String? /// The name of the patch baseline. public let baselineName: String? - /// Whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system. + /// Indicates whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system. public let defaultBaseline: Bool? /// Defines the operating system the patch baseline applies to. The default value is WINDOWS. public let operatingSystem: OperatingSystem? @@ -11143,9 +11140,9 @@ extension SSM { } public struct PatchRule: AWSEncodableShape & AWSDecodableShape { - /// The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. Not supported on Debian Server or Ubuntu Server. + /// The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. This parameter is marked as not required, but your request must include a value for either ApproveAfterDays or ApproveUntilDate. Not supported for Debian Server or Ubuntu Server. public let approveAfterDays: Int? - /// The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server. Enter dates in the format YYYY-MM-DD. For example, 2021-12-31. + /// The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Enter dates in the format YYYY-MM-DD. For example, 2021-12-31. This parameter is marked as not required, but your request must include a value for either ApproveUntilDate or ApproveAfterDays. Not supported for Debian Server or Ubuntu Server. public let approveUntilDate: String? /// A compliance severity level for all approved patches in a patch baseline. public let complianceLevel: PatchComplianceLevel? @@ -14012,8 +14009,7 @@ extension SSM { public let name: String? /// The updated priority value. public let priority: Int? - /// The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service - /// (Amazon SNS) notifications for maintenance window Run Command tasks. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The updated target values. public let targets: [Target]? @@ -14271,7 +14267,7 @@ extension SSM { public let name: String? /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let rejectedPatches: [String]? - /// The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified. BLOCK: Packages in the Rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the Rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as InstalledRejected. + /// The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified. BLOCK All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. public let rejectedPatchesAction: PatchAction? /// If True, then all fields that are required by the CreatePatchBaseline operation are also required for this API request. Optional fields that aren't specified are set to null. public let replace: Bool? diff --git a/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift b/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift new file mode 100644 index 0000000000..c1c5ba6f55 --- /dev/null +++ b/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift @@ -0,0 +1,272 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS SSMQuickSetup service. +/// +/// Quick Setup helps you quickly configure frequently used services and features with recommended best practices. Quick Setup simplifies setting up services, including Systems Manager, by automating common or recommended tasks. +public struct SSMQuickSetup: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the SSMQuickSetup client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "SSMQuickSetup", + serviceIdentifier: "ssm-quicksetup", + serviceProtocol: .restjson, + apiVersion: "2018-05-10", + endpoint: endpoint, + errorType: SSMQuickSetupErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Creates a Quick Setup configuration manager resource. This object is a collection of desired state configurations for multiple configuration definitions and summaries describing the deployments of those definitions. + @Sendable + public func createConfigurationManager(_ input: CreateConfigurationManagerInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConfigurationManagerOutput { + return try await self.client.execute( + operation: "CreateConfigurationManager", + path: "/configurationManager", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a configuration manager. + @Sendable + public func deleteConfigurationManager(_ input: DeleteConfigurationManagerInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteConfigurationManager", + path: "/configurationManager/{ManagerArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a configuration manager. + @Sendable + public func getConfigurationManager(_ input: GetConfigurationManagerInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfigurationManagerOutput { + return try await self.client.execute( + operation: "GetConfigurationManager", + path: "/configurationManager/{ManagerArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns settings configured for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region. + @Sendable + public func getServiceSettings(logger: Logger = AWSClient.loggingDisabled) async throws -> GetServiceSettingsOutput { + return try await self.client.execute( + operation: "GetServiceSettings", + path: "/serviceSettings", + httpMethod: .GET, + serviceConfig: self.config, + logger: logger + ) + } + + /// Returns Quick Setup configuration managers. + @Sendable + public func listConfigurationManagers(_ input: ListConfigurationManagersInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListConfigurationManagersOutput { + return try await self.client.execute( + operation: "ListConfigurationManagers", + path: "/listConfigurationManagers", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns the available Quick Setup types. + @Sendable + public func listQuickSetupTypes(logger: Logger = AWSClient.loggingDisabled) async throws -> ListQuickSetupTypesOutput { + return try await self.client.execute( + operation: "ListQuickSetupTypes", + path: "/listQuickSetupTypes", + httpMethod: .GET, + serviceConfig: self.config, + logger: logger + ) + } + + /// Returns tags assigned to the resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{ResourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Assigns key-value pairs of metadata to Amazon Web Services resources. + @Sendable + public func tagResource(_ input: TagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "TagResource", + path: "/tags/{ResourceArn}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Removes tags from the specified resource. + @Sendable + public func untagResource(_ input: UntagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "UntagResource", + path: "/tags/{ResourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates a Quick Setup configuration definition. + @Sendable + public func updateConfigurationDefinition(_ input: UpdateConfigurationDefinitionInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "UpdateConfigurationDefinition", + path: "/configurationDefinition/{ManagerArn}/{Id}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates a Quick Setup configuration manager. + @Sendable + public func updateConfigurationManager(_ input: UpdateConfigurationManagerInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "UpdateConfigurationManager", + path: "/configurationManager/{ManagerArn}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates settings configured for Quick Setup. + @Sendable + public func updateServiceSettings(_ input: UpdateServiceSettingsInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "UpdateServiceSettings", + path: "/serviceSettings", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension SSMQuickSetup { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: SSMQuickSetup, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension SSMQuickSetup { + /// Returns Quick Setup configuration managers. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listConfigurationManagersPaginator( + _ input: ListConfigurationManagersInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listConfigurationManagers, + inputKey: \ListConfigurationManagersInput.startingToken, + outputKey: \ListConfigurationManagersOutput.nextToken, + logger: logger + ) + } +} + +extension SSMQuickSetup.ListConfigurationManagersInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> SSMQuickSetup.ListConfigurationManagersInput { + return .init( + filters: self.filters, + maxItems: self.maxItems, + startingToken: token + ) + } +} diff --git a/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_shapes.swift b/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_shapes.swift new file mode 100644 index 0000000000..fa1f7cf8a2 --- /dev/null +++ b/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_shapes.swift @@ -0,0 +1,650 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension SSMQuickSetup { + // MARK: Enums + + public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deleteFailed = "DELETE_FAILED" + case deleting = "DELETING" + case deploying = "DEPLOYING" + case failed = "FAILED" + case initializing = "INITIALIZING" + case none = "NONE" + case stopFailed = "STOP_FAILED" + case stopped = "STOPPED" + case stopping = "STOPPING" + case succeeded = "SUCCEEDED" + public var description: String { return self.rawValue } + } + + public enum StatusType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case asyncExecutions = "AsyncExecutions" + case deployment = "Deployment" + public var description: String { return self.rawValue } + } + + // MARK: Shapes + + public struct ConfigurationDefinition: AWSDecodableShape { + /// The ID of the configuration definition. + public let id: String? + /// The ARN of the IAM role used to administrate local configuration deployments. + public let localDeploymentAdministrationRoleArn: String? + /// The name of the IAM role used to deploy local configurations. + public let localDeploymentExecutionRoleName: String? + /// A list of key-value pairs containing the required parameters for the configuration type. + public let parameters: [String: String] + /// The type of the Quick Setup configuration. + public let type: String + /// The version of the Quick Setup type used. + public let typeVersion: String? + + public init(id: String? = nil, localDeploymentAdministrationRoleArn: String? = nil, localDeploymentExecutionRoleName: String? = nil, parameters: [String: String], type: String, typeVersion: String? = nil) { + self.id = id + self.localDeploymentAdministrationRoleArn = localDeploymentAdministrationRoleArn + self.localDeploymentExecutionRoleName = localDeploymentExecutionRoleName + self.parameters = parameters + self.type = type + self.typeVersion = typeVersion + } + + private enum CodingKeys: String, CodingKey { + case id = "Id" + case localDeploymentAdministrationRoleArn = "LocalDeploymentAdministrationRoleArn" + case localDeploymentExecutionRoleName = "LocalDeploymentExecutionRoleName" + case parameters = "Parameters" + case type = "Type" + case typeVersion = "TypeVersion" + } + } + + public struct ConfigurationDefinitionInput: AWSEncodableShape { + /// The ARN of the IAM role used to administrate local configuration deployments. + public let localDeploymentAdministrationRoleArn: String? + /// The name of the IAM role used to deploy local configurations. + public let localDeploymentExecutionRoleName: String? + /// The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type. OpsCenter (Type: Amazon Web ServicesQuickSetupType-SSMOpsCenter) DelegatedAccountId Description: (Required) The ID of the delegated administrator account. TargetOrganizationalUnits Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Resource Scheduler (Type: Amazon Web ServicesQuickSetupType-Scheduler) TargetTagKey Description: (Required) The tag key assigned to the instances you want to target. TargetTagValue Description: (Required) The value of the tag key assigned to the instances you want to target. ICalendarString Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use. TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Default Host Management Configuration (Type: Amazon Web ServicesQuickSetupType-DHMC) UpdateSSMAgent Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is "true". TargetOrganizationalUnits Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Resource Explorer (Type: Amazon Web ServicesQuickSetupType-ResourceExplorer) SelectedAggregatorRegion Description: (Required) The Amazon Web Services Region where you want to create the aggregator index. ReplaceExistingAggregator Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the SelectedAggregatorRegion. TargetOrganizationalUnits Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Change Manager (Type: Amazon Web ServicesQuickSetupType-SSMChangeMgr) DelegatedAccountId Description: (Required) The ID of the delegated administrator account. JobFunction Description: (Required) The name for the Change Manager job function. PermissionType Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are CustomPermissions and AdminPermissions. The default value for the parameter is CustomerPermissions. CustomPermissions Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify CustomPermissions for the PermissionType parameter. TargetOrganizationalUnits Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. DevOps Guru (Type: Amazon Web ServicesQuickSetupType-DevOpsGuru) AnalyseAllResources Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all CloudFormation stacks in the account. The default value is "false". EnableSnsNotifications Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is "true". EnableSsmOpsItems Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is "true". EnableDriftRemediation Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is "false". RemediationSchedule Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(14 days), rate(1 days), and none. The default value is "none". TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Conformance Packs (Type: Amazon Web ServicesQuickSetupType-CFGCPacks) DelegatedAccountId Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments. RemediationSchedule Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(14 days), rate(2 days), and none. The default value is "none". CPackNames Description: (Required) A comma separated list of Config conformance packs. TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Config Recording (Type: Amazon Web ServicesQuickSetupType-CFGRecording) RecordAllResources Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is "true". ResourceTypesToRecord Description: (Optional) A comma separated list of resource types you want to record. RecordGlobalResourceTypes Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is "false". GlobalResourceTypesRegion Description: (Optional) Determines the Amazon Web Services Region where global resources are recorded. UseCustomBucket Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is "false". DeliveryBucketName Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver configuration snapshots and configuration history files to. DeliveryBucketPrefix Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket. NotificationOptions Description: (Optional) Determines the notification configuration for the recorder. The valid values are NoStreaming, UseExistingTopic, and CreateTopic. The default value is NoStreaming. CustomDeliveryTopicAccountId Description: (Optional) The ID of the Amazon Web Services account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the UseExistingTopic notification option. CustomDeliveryTopicName Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the UseExistingTopic notification option. RemediationSchedule Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(7 days), rate(1 days), and none. The default value is "none". TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Host Management (Type: Amazon Web ServicesQuickSetupType-SSMHostMgmt) UpdateSSMAgent Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is "true". UpdateEc2LaunchAgent Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is "false". CollectInventory Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is "true". ScanInstances Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is "true". InstallCloudWatchAgent Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is "false". UpdateCloudWatchAgent Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is "false". IsPolicyAttachAllowed Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is "false". TargetType Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *, InstanceIds, ResourceGroups, and Tags. Use * to target all instances in the account. TargetInstances Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds for the TargetType parameter. TargetTagKey Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter. TargetTagValue Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter. ResourceGroupName Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups for the TargetType parameter. TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Distributor (Type: Amazon Web ServicesQuickSetupType-Distributor) PackagesToInstall Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are AWSEFSTools, AWSCWAgent, and AWSEC2LaunchAgent. RemediationSchedule Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(14 days), rate(2 days), and none. The default value is "rate(30 days)". IsPolicyAttachAllowed Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is "false". TargetType Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *, InstanceIds, ResourceGroups, and Tags. Use * to target all instances in the account. TargetInstances Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds for the TargetType parameter. TargetTagKey Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter. TargetTagValue Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter. ResourceGroupName Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups for the TargetType parameter. TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. Patch Policy (Type: Amazon Web ServicesQuickSetupType-PatchPolicy) PatchPolicyName Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag. SelectedPatchBaselines Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy. PatchBaselineUseDefault Description: (Optional) A boolean value that determines whether the selected patch baselines are all Amazon Web Services provided. ConfigurationOptionsPatchOperation Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are Scan and ScanAndInstall. The default value for the parameter is Scan. ConfigurationOptionsScanValue Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches. ConfigurationOptionsInstallValue Description: (Optional) A cron expression that is used as the schedule for when instances install available patches. ConfigurationOptionsScanNextInterval Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is "false". ConfigurationOptionsInstallNextInterval Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is "false". RebootOption Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is "false". IsPolicyAttachAllowed Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is "false". OutputLogEnableS3 Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3. OutputS3Location Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request. OutputS3BucketRegion Description: (Optional) The Amazon Web Services Region where the Amazon S3 bucket you want Config to deliver command output to is located. OutputS3BucketName Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver command output to. OutputS3KeyPrefix Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket. TargetType Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *, InstanceIds, ResourceGroups, and Tags. Use * to target all instances in the account. TargetInstances Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds for the TargetType parameter. TargetTagKey Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter. TargetTagValue Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter. ResourceGroupName Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups for the TargetType parameter. TargetAccounts Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits. TargetOrganizationalUnits Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to. TargetRegions Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to. + public let parameters: [String: String] + /// The type of the Quick Setup configuration. + public let type: String + /// The version of the Quick Setup type to use. + public let typeVersion: String? + + public init(localDeploymentAdministrationRoleArn: String? = nil, localDeploymentExecutionRoleName: String? = nil, parameters: [String: String], type: String, typeVersion: String? = nil) { + self.localDeploymentAdministrationRoleArn = localDeploymentAdministrationRoleArn + self.localDeploymentExecutionRoleName = localDeploymentExecutionRoleName + self.parameters = parameters + self.type = type + self.typeVersion = typeVersion + } + + private enum CodingKeys: String, CodingKey { + case localDeploymentAdministrationRoleArn = "LocalDeploymentAdministrationRoleArn" + case localDeploymentExecutionRoleName = "LocalDeploymentExecutionRoleName" + case parameters = "Parameters" + case type = "Type" + case typeVersion = "TypeVersion" + } + } + + public struct ConfigurationDefinitionSummary: AWSDecodableShape { + /// The common parameters and values for the configuration definition. + public let firstClassParameters: [String: String]? + /// The ID of the configuration definition. + public let id: String? + /// The type of the Quick Setup configuration used by the configuration definition. + public let type: String? + /// The version of the Quick Setup type used by the configuration definition. + public let typeVersion: String? + + public init(firstClassParameters: [String: String]? = nil, id: String? = nil, type: String? = nil, typeVersion: String? = nil) { + self.firstClassParameters = firstClassParameters + self.id = id + self.type = type + self.typeVersion = typeVersion + } + + private enum CodingKeys: String, CodingKey { + case firstClassParameters = "FirstClassParameters" + case id = "Id" + case type = "Type" + case typeVersion = "TypeVersion" + } + } + + public struct ConfigurationManagerSummary: AWSDecodableShape { + /// A summary of the Quick Setup configuration definition. + public let configurationDefinitionSummaries: [ConfigurationDefinitionSummary]? + /// The description of the configuration. + public let description: String? + /// The ARN of the Quick Setup configuration. + public let managerArn: String + /// The name of the configuration + public let name: String? + /// Summaries of the state of the configuration manager. These summaries include an aggregate of the statuses from the configuration definition associated with the configuration manager. This includes deployment statuses, association statuses, drift statuses, health checks, and more. + public let statusSummaries: [StatusSummary]? + + public init(configurationDefinitionSummaries: [ConfigurationDefinitionSummary]? = nil, description: String? = nil, managerArn: String, name: String? = nil, statusSummaries: [StatusSummary]? = nil) { + self.configurationDefinitionSummaries = configurationDefinitionSummaries + self.description = description + self.managerArn = managerArn + self.name = name + self.statusSummaries = statusSummaries + } + + private enum CodingKeys: String, CodingKey { + case configurationDefinitionSummaries = "ConfigurationDefinitionSummaries" + case description = "Description" + case managerArn = "ManagerArn" + case name = "Name" + case statusSummaries = "StatusSummaries" + } + } + + public struct CreateConfigurationManagerInput: AWSEncodableShape { + /// The definition of the Quick Setup configuration that the configuration manager deploys. + public let configurationDefinitions: [ConfigurationDefinitionInput] + /// A description of the configuration manager. + public let description: String? + /// A name for the configuration manager. + public let name: String? + /// Key-value pairs of metadata to assign to the configuration manager. + public let tags: [String: String]? + + public init(configurationDefinitions: [ConfigurationDefinitionInput], description: String? = nil, name: String? = nil, tags: [String: String]? = nil) { + self.configurationDefinitions = configurationDefinitions + self.description = description + self.name = name + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case configurationDefinitions = "ConfigurationDefinitions" + case description = "Description" + case name = "Name" + case tags = "Tags" + } + } + + public struct CreateConfigurationManagerOutput: AWSDecodableShape { + /// The ARN for the newly created configuration manager. + public let managerArn: String + + public init(managerArn: String) { + self.managerArn = managerArn + } + + private enum CodingKeys: String, CodingKey { + case managerArn = "ManagerArn" + } + } + + public struct DeleteConfigurationManagerInput: AWSEncodableShape { + /// The ID of the configuration manager. + public let managerArn: String + + public init(managerArn: String) { + self.managerArn = managerArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.managerArn, key: "ManagerArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct Filter: AWSEncodableShape { + /// The key for the filter. + public let key: String + /// The values for the filter keys. + public let values: [String] + + public init(key: String, values: [String]) { + self.key = key + self.values = values + } + + private enum CodingKeys: String, CodingKey { + case key = "Key" + case values = "Values" + } + } + + public struct GetConfigurationManagerInput: AWSEncodableShape { + /// The ARN of the configuration manager. + public let managerArn: String + + public init(managerArn: String) { + self.managerArn = managerArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.managerArn, key: "ManagerArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetConfigurationManagerOutput: AWSDecodableShape { + /// The configuration definitions association with the configuration manager. + public let configurationDefinitions: [ConfigurationDefinition]? + /// The datetime stamp when the configuration manager was created. + public let createdAt: Date? + /// The description of the configuration manager. + public let description: String? + /// The datetime stamp when the configuration manager was last updated. + public let lastModifiedAt: Date? + /// The ARN of the configuration manager. + public let managerArn: String + /// The name of the configuration manager. + public let name: String? + /// A summary of the state of the configuration manager. This includes deployment statuses, association statuses, drift statuses, health checks, and more. + public let statusSummaries: [StatusSummary]? + /// Key-value pairs of metadata to assign to the configuration manager. + public let tags: [String: String]? + + public init(configurationDefinitions: [ConfigurationDefinition]? = nil, createdAt: Date? = nil, description: String? = nil, lastModifiedAt: Date? = nil, managerArn: String, name: String? = nil, statusSummaries: [StatusSummary]? = nil, tags: [String: String]? = nil) { + self.configurationDefinitions = configurationDefinitions + self.createdAt = createdAt + self.description = description + self.lastModifiedAt = lastModifiedAt + self.managerArn = managerArn + self.name = name + self.statusSummaries = statusSummaries + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case configurationDefinitions = "ConfigurationDefinitions" + case createdAt = "CreatedAt" + case description = "Description" + case lastModifiedAt = "LastModifiedAt" + case managerArn = "ManagerArn" + case name = "Name" + case statusSummaries = "StatusSummaries" + case tags = "Tags" + } + } + + public struct GetServiceSettingsOutput: AWSDecodableShape { + /// Returns details about the settings for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region. + public let serviceSettings: ServiceSettings? + + public init(serviceSettings: ServiceSettings? = nil) { + self.serviceSettings = serviceSettings + } + + private enum CodingKeys: String, CodingKey { + case serviceSettings = "ServiceSettings" + } + } + + public struct ListConfigurationManagersInput: AWSEncodableShape { + /// Filters the results returned by the request. + public let filters: [Filter]? + /// Specifies the maximum number of configuration managers that are returned by the request. + public let maxItems: Int? + /// The token to use when requesting a specific set of items from a list. + public let startingToken: String? + + public init(filters: [Filter]? = nil, maxItems: Int? = nil, startingToken: String? = nil) { + self.filters = filters + self.maxItems = maxItems + self.startingToken = startingToken + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case maxItems = "MaxItems" + case startingToken = "StartingToken" + } + } + + public struct ListConfigurationManagersOutput: AWSDecodableShape { + /// The configuration managers returned by the request. + public let configurationManagersList: [ConfigurationManagerSummary]? + /// The token to use when requesting the next set of configuration managers. If there are no additional operations to return, the string is empty. + public let nextToken: String? + + public init(configurationManagersList: [ConfigurationManagerSummary]? = nil, nextToken: String? = nil) { + self.configurationManagersList = configurationManagersList + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case configurationManagersList = "ConfigurationManagersList" + case nextToken = "NextToken" + } + } + + public struct ListQuickSetupTypesOutput: AWSDecodableShape { + /// An array of Quick Setup types. + public let quickSetupTypeList: [QuickSetupTypeOutput]? + + public init(quickSetupTypeList: [QuickSetupTypeOutput]? = nil) { + self.quickSetupTypeList = quickSetupTypeList + } + + private enum CodingKeys: String, CodingKey { + case quickSetupTypeList = "QuickSetupTypeList" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The ARN of the resource the tag is assigned to. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// Key-value pairs of metadata assigned to the resource. + public let tags: [TagEntry]? + + public init(tags: [TagEntry]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct QuickSetupTypeOutput: AWSDecodableShape { + /// The latest version number of the configuration. + public let latestVersion: String? + /// The type of the Quick Setup configuration. + public let type: String? + + public init(latestVersion: String? = nil, type: String? = nil) { + self.latestVersion = latestVersion + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case latestVersion = "LatestVersion" + case type = "Type" + } + } + + public struct ServiceSettings: AWSDecodableShape { + /// The IAM role used to enable Explorer. + public let explorerEnablingRoleArn: String? + + public init(explorerEnablingRoleArn: String? = nil) { + self.explorerEnablingRoleArn = explorerEnablingRoleArn + } + + private enum CodingKeys: String, CodingKey { + case explorerEnablingRoleArn = "ExplorerEnablingRoleArn" + } + } + + public struct StatusSummary: AWSDecodableShape { + /// The datetime stamp when the status was last updated. + public let lastUpdatedAt: Date + /// The current status. + public let status: Status? + /// Details about the status. + public let statusDetails: [String: String]? + /// When applicable, returns an informational message relevant to the current status and status type of the status summary object. We don't recommend implementing parsing logic around this value since the messages returned can vary in format. + public let statusMessage: String? + /// The type of a status summary. + public let statusType: StatusType + + public init(lastUpdatedAt: Date, status: Status? = nil, statusDetails: [String: String]? = nil, statusMessage: String? = nil, statusType: StatusType) { + self.lastUpdatedAt = lastUpdatedAt + self.status = status + self.statusDetails = statusDetails + self.statusMessage = statusMessage + self.statusType = statusType + } + + private enum CodingKeys: String, CodingKey { + case lastUpdatedAt = "LastUpdatedAt" + case status = "Status" + case statusDetails = "StatusDetails" + case statusMessage = "StatusMessage" + case statusType = "StatusType" + } + } + + public struct TagEntry: AWSDecodableShape { + /// The key for the tag. + public let key: String? + /// The value for the tag. + public let value: String? + + public init(key: String? = nil, value: String? = nil) { + self.key = key + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case key = "Key" + case value = "Value" + } + } + + public struct TagResourceInput: AWSEncodableShape { + /// The ARN of the resource to tag. + public let resourceArn: String + /// Key-value pairs of metadata to assign to the resource. + public let tags: [String: String] + + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + try container.encode(self.tags, forKey: .tags) + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct UntagResourceInput: AWSEncodableShape { + /// The ARN of the resource to remove tags from. + public let resourceArn: String + /// The keys of the tags to remove from the resource. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + private enum CodingKeys: CodingKey {} + } + + public struct UpdateConfigurationDefinitionInput: AWSEncodableShape { + /// The ID of the configuration definition you want to update. + public let id: String + /// The ARN of the IAM role used to administrate local configuration deployments. + public let localDeploymentAdministrationRoleArn: String? + /// The name of the IAM role used to deploy local configurations. + public let localDeploymentExecutionRoleName: String? + /// The ARN of the configuration manager associated with the definition to update. + public let managerArn: String + /// The parameters for the configuration definition type. + public let parameters: [String: String]? + /// The version of the Quick Setup type to use. + public let typeVersion: String? + + public init(id: String, localDeploymentAdministrationRoleArn: String? = nil, localDeploymentExecutionRoleName: String? = nil, managerArn: String, parameters: [String: String]? = nil, typeVersion: String? = nil) { + self.id = id + self.localDeploymentAdministrationRoleArn = localDeploymentAdministrationRoleArn + self.localDeploymentExecutionRoleName = localDeploymentExecutionRoleName + self.managerArn = managerArn + self.parameters = parameters + self.typeVersion = typeVersion + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.id, key: "Id") + try container.encodeIfPresent(self.localDeploymentAdministrationRoleArn, forKey: .localDeploymentAdministrationRoleArn) + try container.encodeIfPresent(self.localDeploymentExecutionRoleName, forKey: .localDeploymentExecutionRoleName) + request.encodePath(self.managerArn, key: "ManagerArn") + try container.encodeIfPresent(self.parameters, forKey: .parameters) + try container.encodeIfPresent(self.typeVersion, forKey: .typeVersion) + } + + private enum CodingKeys: String, CodingKey { + case localDeploymentAdministrationRoleArn = "LocalDeploymentAdministrationRoleArn" + case localDeploymentExecutionRoleName = "LocalDeploymentExecutionRoleName" + case parameters = "Parameters" + case typeVersion = "TypeVersion" + } + } + + public struct UpdateConfigurationManagerInput: AWSEncodableShape { + /// A description of the configuration manager. + public let description: String? + /// The ARN of the configuration manager. + public let managerArn: String + /// A name for the configuration manager. + public let name: String? + + public init(description: String? = nil, managerArn: String, name: String? = nil) { + self.description = description + self.managerArn = managerArn + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.managerArn, key: "ManagerArn") + try container.encodeIfPresent(self.name, forKey: .name) + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case name = "Name" + } + } + + public struct UpdateServiceSettingsInput: AWSEncodableShape { + /// The IAM role used to enable Explorer. + public let explorerEnablingRoleArn: String? + + public init(explorerEnablingRoleArn: String? = nil) { + self.explorerEnablingRoleArn = explorerEnablingRoleArn + } + + private enum CodingKeys: String, CodingKey { + case explorerEnablingRoleArn = "ExplorerEnablingRoleArn" + } + } +} + +// MARK: - Errors + +/// Error enum for SSMQuickSetup +public struct SSMQuickSetupErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize SSMQuickSetup + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// The requester has insufficient permissions to perform the operation. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// Another request is being processed. Wait a few minutes and try again. + public static var conflictException: Self { .init(.conflictException) } + /// An error occurred on the server side. + public static var internalServerException: Self { .init(.internalServerException) } + /// The resource couldn't be found. Check the ID or name and try again. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The request or operation exceeds the maximum allowed request rate per Amazon Web Services account and Amazon Web Services Region. + public static var throttlingException: Self { .init(.throttlingException) } + /// The request is invalid. Verify the values provided for the request parameters are accurate. + public static var validationException: Self { .init(.validationException) } +} + +extension SSMQuickSetupErrorType: Equatable { + public static func == (lhs: SSMQuickSetupErrorType, rhs: SSMQuickSetupErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension SSMQuickSetupErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index dbd1ec69f1..fe68a0f0f2 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -230,7 +230,7 @@ public struct SageMaker: AWSService { ) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. @Sendable public func createAutoMLJob(_ input: CreateAutoMLJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAutoMLJobResponse { return try await self.client.execute( @@ -243,7 +243,7 @@ public struct SageMaker: AWSService { ) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. @Sendable public func createAutoMLJobV2(_ input: CreateAutoMLJobV2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAutoMLJobV2Response { return try await self.client.execute( diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index d7a27d3d82..417ddc787f 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -1482,6 +1482,7 @@ extension SageMaker { case endpoints = "Endpoints" case experiments = "Experiments" case featureStore = "FeatureStore" + case inferenceOptimization = "InferenceOptimization" case inferenceRecommender = "InferenceRecommender" case jumpStart = "JumpStart" case modelEvaluation = "ModelEvaluation" @@ -4374,6 +4375,23 @@ extension SageMaker { } } + public struct AutoMLComputeConfig: AWSEncodableShape & AWSDecodableShape { + /// The configuration for using EMR Serverless to run the AutoML job V2. To allow your AutoML job V2 to automatically initiate a remote job on EMR Serverless when additional compute resources are needed to process large datasets, you need to provide an EmrServerlessComputeConfig object, which includes an ExecutionRoleARN attribute, to the AutoMLComputeConfig of the AutoML job V2 input request. By seamlessly transitioning to EMR Serverless when required, the AutoML job can handle datasets that would otherwise exceed the initially provisioned resources, without any manual intervention from you. EMR Serverless is available for the tabular and time series problem types. We recommend setting up this option for tabular datasets larger than 5 GB and time series datasets larger than 30 GB. + public let emrServerlessComputeConfig: EmrServerlessComputeConfig? + + public init(emrServerlessComputeConfig: EmrServerlessComputeConfig? = nil) { + self.emrServerlessComputeConfig = emrServerlessComputeConfig + } + + public func validate(name: String) throws { + try self.emrServerlessComputeConfig?.validate(name: "\(name).emrServerlessComputeConfig") + } + + private enum CodingKeys: String, CodingKey { + case emrServerlessComputeConfig = "EmrServerlessComputeConfig" + } + } + public struct AutoMLContainerDefinition: AWSDecodableShape { /// The environment variables to set in the container. For more information, see ContainerDefinition. public let environment: [String: String]? @@ -4616,7 +4634,7 @@ extension SageMaker { public struct AutoMLOutputDataConfig: AWSEncodableShape & AWSDecodableShape { /// The Key Management Service encryption key ID. public let kmsKeyId: String? - /// The Amazon S3 output path. Must be 128 characters or less. + /// The Amazon S3 output path. Must be 512 characters or less. public let s3OutputPath: String? public init(kmsKeyId: String? = nil, s3OutputPath: String? = nil) { @@ -5138,6 +5156,8 @@ extension SageMaker { public struct CanvasAppSettings: AWSEncodableShape & AWSDecodableShape { /// The model deployment settings for the SageMaker Canvas application. public let directDeploySettings: DirectDeploySettings? + /// The settings for running Amazon EMR Serverless data processing jobs in SageMaker Canvas. + public let emrServerlessSettings: EmrServerlessSettings? /// The generative AI settings for the SageMaker Canvas application. public let generativeAiSettings: GenerativeAiSettings? /// The settings for connecting to an external data source with OAuth. @@ -5151,8 +5171,9 @@ extension SageMaker { /// The workspace settings for the SageMaker Canvas application. public let workspaceSettings: WorkspaceSettings? - public init(directDeploySettings: DirectDeploySettings? = nil, generativeAiSettings: GenerativeAiSettings? = nil, identityProviderOAuthSettings: [IdentityProviderOAuthSetting]? = nil, kendraSettings: KendraSettings? = nil, modelRegisterSettings: ModelRegisterSettings? = nil, timeSeriesForecastingSettings: TimeSeriesForecastingSettings? = nil, workspaceSettings: WorkspaceSettings? = nil) { + public init(directDeploySettings: DirectDeploySettings? = nil, emrServerlessSettings: EmrServerlessSettings? = nil, generativeAiSettings: GenerativeAiSettings? = nil, identityProviderOAuthSettings: [IdentityProviderOAuthSetting]? = nil, kendraSettings: KendraSettings? = nil, modelRegisterSettings: ModelRegisterSettings? = nil, timeSeriesForecastingSettings: TimeSeriesForecastingSettings? = nil, workspaceSettings: WorkspaceSettings? = nil) { self.directDeploySettings = directDeploySettings + self.emrServerlessSettings = emrServerlessSettings self.generativeAiSettings = generativeAiSettings self.identityProviderOAuthSettings = identityProviderOAuthSettings self.kendraSettings = kendraSettings @@ -5162,6 +5183,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.emrServerlessSettings?.validate(name: "\(name).emrServerlessSettings") try self.generativeAiSettings?.validate(name: "\(name).generativeAiSettings") try self.identityProviderOAuthSettings?.forEach { try $0.validate(name: "\(name).identityProviderOAuthSettings[]") @@ -5174,6 +5196,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case directDeploySettings = "DirectDeploySettings" + case emrServerlessSettings = "EmrServerlessSettings" case generativeAiSettings = "GenerativeAiSettings" case identityProviderOAuthSettings = "IdentityProviderOAuthSettings" case kendraSettings = "KendraSettings" @@ -6881,6 +6904,8 @@ extension SageMaker { } public struct CreateAutoMLJobV2Request: AWSEncodableShape { + /// Specifies the compute configuration for the AutoML job V2. + public let autoMLComputeConfig: AutoMLComputeConfig? /// An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the InputDataConfig attribute in the CreateAutoMLJob input parameters. The supported formats depend on the problem type: For tabular problem types: S3Prefix, ManifestFile. For image classification: S3Prefix, ManifestFile, AugmentedManifestFile. For text classification: S3Prefix. For time-series forecasting: S3Prefix. For text generation (LLMs fine-tuning): S3Prefix. public let autoMLJobInputDataConfig: [AutoMLJobChannel]? /// Identifies an Autopilot job. The name must be unique to your account and is case insensitive. @@ -6902,7 +6927,8 @@ extension SageMaker { /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, such as by purpose, owner, or environment. For more information, see Tagging Amazon Web ServicesResources. Tag keys must be unique per resource. public let tags: [Tag]? - public init(autoMLJobInputDataConfig: [AutoMLJobChannel]? = nil, autoMLJobName: String? = nil, autoMLJobObjective: AutoMLJobObjective? = nil, autoMLProblemTypeConfig: AutoMLProblemTypeConfig? = nil, dataSplitConfig: AutoMLDataSplitConfig? = nil, modelDeployConfig: ModelDeployConfig? = nil, outputDataConfig: AutoMLOutputDataConfig? = nil, roleArn: String? = nil, securityConfig: AutoMLSecurityConfig? = nil, tags: [Tag]? = nil) { + public init(autoMLComputeConfig: AutoMLComputeConfig? = nil, autoMLJobInputDataConfig: [AutoMLJobChannel]? = nil, autoMLJobName: String? = nil, autoMLJobObjective: AutoMLJobObjective? = nil, autoMLProblemTypeConfig: AutoMLProblemTypeConfig? = nil, dataSplitConfig: AutoMLDataSplitConfig? = nil, modelDeployConfig: ModelDeployConfig? = nil, outputDataConfig: AutoMLOutputDataConfig? = nil, roleArn: String? = nil, securityConfig: AutoMLSecurityConfig? = nil, tags: [Tag]? = nil) { + self.autoMLComputeConfig = autoMLComputeConfig self.autoMLJobInputDataConfig = autoMLJobInputDataConfig self.autoMLJobName = autoMLJobName self.autoMLJobObjective = autoMLJobObjective @@ -6916,6 +6942,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.autoMLComputeConfig?.validate(name: "\(name).autoMLComputeConfig") try self.autoMLJobInputDataConfig?.forEach { try $0.validate(name: "\(name).autoMLJobInputDataConfig[]") } @@ -6939,6 +6966,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case autoMLComputeConfig = "AutoMLComputeConfig" case autoMLJobInputDataConfig = "AutoMLJobInputDataConfig" case autoMLJobName = "AutoMLJobName" case autoMLJobObjective = "AutoMLJobObjective" @@ -13268,6 +13296,8 @@ extension SageMaker { } public struct DescribeAutoMLJobV2Response: AWSDecodableShape { + /// The compute configuration used for the AutoML job V2. + public let autoMLComputeConfig: AutoMLComputeConfig? /// Returns the Amazon Resource Name (ARN) of the AutoML job V2. public let autoMLJobArn: String? public let autoMLJobArtifacts: AutoMLJobArtifacts? @@ -13312,7 +13342,8 @@ extension SageMaker { /// Returns the security configuration for traffic encryption or Amazon VPC settings. public let securityConfig: AutoMLSecurityConfig? - public init(autoMLJobArn: String? = nil, autoMLJobArtifacts: AutoMLJobArtifacts? = nil, autoMLJobInputDataConfig: [AutoMLJobChannel]? = nil, autoMLJobName: String? = nil, autoMLJobObjective: AutoMLJobObjective? = nil, autoMLJobSecondaryStatus: AutoMLJobSecondaryStatus? = nil, autoMLJobStatus: AutoMLJobStatus? = nil, autoMLProblemTypeConfig: AutoMLProblemTypeConfig? = nil, autoMLProblemTypeConfigName: AutoMLProblemTypeConfigName? = nil, bestCandidate: AutoMLCandidate? = nil, creationTime: Date? = nil, dataSplitConfig: AutoMLDataSplitConfig? = nil, endTime: Date? = nil, failureReason: String? = nil, lastModifiedTime: Date? = nil, modelDeployConfig: ModelDeployConfig? = nil, modelDeployResult: ModelDeployResult? = nil, outputDataConfig: AutoMLOutputDataConfig? = nil, partialFailureReasons: [AutoMLPartialFailureReason]? = nil, resolvedAttributes: AutoMLResolvedAttributes? = nil, roleArn: String? = nil, securityConfig: AutoMLSecurityConfig? = nil) { + public init(autoMLComputeConfig: AutoMLComputeConfig? = nil, autoMLJobArn: String? = nil, autoMLJobArtifacts: AutoMLJobArtifacts? = nil, autoMLJobInputDataConfig: [AutoMLJobChannel]? = nil, autoMLJobName: String? = nil, autoMLJobObjective: AutoMLJobObjective? = nil, autoMLJobSecondaryStatus: AutoMLJobSecondaryStatus? = nil, autoMLJobStatus: AutoMLJobStatus? = nil, autoMLProblemTypeConfig: AutoMLProblemTypeConfig? = nil, autoMLProblemTypeConfigName: AutoMLProblemTypeConfigName? = nil, bestCandidate: AutoMLCandidate? = nil, creationTime: Date? = nil, dataSplitConfig: AutoMLDataSplitConfig? = nil, endTime: Date? = nil, failureReason: String? = nil, lastModifiedTime: Date? = nil, modelDeployConfig: ModelDeployConfig? = nil, modelDeployResult: ModelDeployResult? = nil, outputDataConfig: AutoMLOutputDataConfig? = nil, partialFailureReasons: [AutoMLPartialFailureReason]? = nil, resolvedAttributes: AutoMLResolvedAttributes? = nil, roleArn: String? = nil, securityConfig: AutoMLSecurityConfig? = nil) { + self.autoMLComputeConfig = autoMLComputeConfig self.autoMLJobArn = autoMLJobArn self.autoMLJobArtifacts = autoMLJobArtifacts self.autoMLJobInputDataConfig = autoMLJobInputDataConfig @@ -13338,6 +13369,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case autoMLComputeConfig = "AutoMLComputeConfig" case autoMLJobArn = "AutoMLJobArn" case autoMLJobArtifacts = "AutoMLJobArtifacts" case autoMLJobInputDataConfig = "AutoMLJobInputDataConfig" @@ -18732,6 +18764,80 @@ extension SageMaker { } } + public struct EmrServerlessComputeConfig: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the IAM role granting the AutoML job V2 the necessary permissions access policies to list, connect to, or manage EMR Serverless jobs. For detailed information about the required permissions of this role, see "How to configure AutoML to initiate a remote job on EMR Serverless for large datasets" in Create a regression or classification job for tabular data using the AutoML API or Create an AutoML job for time-series forecasting using the API. + public let executionRoleARN: String? + + public init(executionRoleARN: String? = nil) { + self.executionRoleARN = executionRoleARN + } + + public func validate(name: String) throws { + try self.validate(self.executionRoleARN, name: "executionRoleARN", parent: name, max: 2048) + try self.validate(self.executionRoleARN, name: "executionRoleARN", parent: name, min: 20) + try self.validate(self.executionRoleARN, name: "executionRoleARN", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + } + + private enum CodingKeys: String, CodingKey { + case executionRoleARN = "ExecutionRoleARN" + } + } + + public struct EmrServerlessSettings: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Web Services IAM role that is assumed for running Amazon EMR Serverless jobs in SageMaker Canvas. This role should have the necessary permissions to read and write data attached and a trust relationship with EMR Serverless. + public let executionRoleArn: String? + /// Describes whether Amazon EMR Serverless job capabilities are enabled or disabled in the SageMaker Canvas application. + public let status: FeatureStatus? + + public init(executionRoleArn: String? = nil, status: FeatureStatus? = nil) { + self.executionRoleArn = executionRoleArn + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 2048) + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 20) + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + } + + private enum CodingKeys: String, CodingKey { + case executionRoleArn = "ExecutionRoleArn" + case status = "Status" + } + } + + public struct EmrSettings: AWSEncodableShape & AWSDecodableShape { + /// An array of Amazon Resource Names (ARNs) of the IAM roles that the execution role of SageMaker can assume for performing operations or tasks related to Amazon EMR clusters or Amazon EMR Serverless applications. These roles define the permissions and access policies required when performing Amazon EMR-related operations, such as listing, connecting to, or terminating Amazon EMR clusters or Amazon EMR Serverless applications. They are typically used in cross-account access scenarios, where the Amazon EMR resources (clusters or serverless applications) are located in a different Amazon Web Services account than the SageMaker domain. + public let assumableRoleArns: [String]? + /// An array of Amazon Resource Names (ARNs) of the IAM roles used by the Amazon EMR cluster instances or job execution environments to access other Amazon Web Services services and resources needed during the runtime of your Amazon EMR or Amazon EMR Serverless workloads, such as Amazon S3 for data access, Amazon CloudWatch for logging, or other Amazon Web Services services based on the particular workload requirements. + public let executionRoleArns: [String]? + + public init(assumableRoleArns: [String]? = nil, executionRoleArns: [String]? = nil) { + self.assumableRoleArns = assumableRoleArns + self.executionRoleArns = executionRoleArns + } + + public func validate(name: String) throws { + try self.assumableRoleArns?.forEach { + try validate($0, name: "assumableRoleArns[]", parent: name, max: 2048) + try validate($0, name: "assumableRoleArns[]", parent: name, min: 20) + try validate($0, name: "assumableRoleArns[]", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + } + try self.validate(self.assumableRoleArns, name: "assumableRoleArns", parent: name, max: 5) + try self.executionRoleArns?.forEach { + try validate($0, name: "executionRoleArns[]", parent: name, max: 2048) + try validate($0, name: "executionRoleArns[]", parent: name, min: 20) + try validate($0, name: "executionRoleArns[]", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + } + try self.validate(self.executionRoleArns, name: "executionRoleArns", parent: name, max: 5) + } + + private enum CodingKeys: String, CodingKey { + case assumableRoleArns = "AssumableRoleArns" + case executionRoleArns = "ExecutionRoleArns" + } + } + public struct EnableSagemakerServicecatalogPortfolioInput: AWSEncodableShape { public init() {} } @@ -18796,6 +18902,19 @@ extension SageMaker { } } + public struct EndpointConfigStepMetadata: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the endpoint configuration used in the step. + public let arn: String? + + public init(arn: String? = nil) { + self.arn = arn + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + } + } + public struct EndpointConfigSummary: AWSDecodableShape { /// A timestamp that shows when the endpoint configuration was created. public let creationTime: Date? @@ -19003,6 +19122,19 @@ extension SageMaker { } } + public struct EndpointStepMetadata: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the endpoint in the step. + public let arn: String? + + public init(arn: String? = nil) { + self.arn = arn + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + } + } + public struct EndpointSummary: AWSDecodableShape { /// A timestamp that shows when the endpoint was created. public let creationTime: Date? @@ -22008,13 +22140,16 @@ extension SageMaker { /// A list of custom SageMaker images that are configured to run as a JupyterLab app. public let customImages: [CustomImage]? public let defaultResourceSpec: ResourceSpec? + /// The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications. + public let emrSettings: EmrSettings? /// The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set LifecycleConfigArns to an empty list. public let lifecycleConfigArns: [String]? - public init(codeRepositories: [CodeRepository]? = nil, customImages: [CustomImage]? = nil, defaultResourceSpec: ResourceSpec? = nil, lifecycleConfigArns: [String]? = nil) { + public init(codeRepositories: [CodeRepository]? = nil, customImages: [CustomImage]? = nil, defaultResourceSpec: ResourceSpec? = nil, emrSettings: EmrSettings? = nil, lifecycleConfigArns: [String]? = nil) { self.codeRepositories = codeRepositories self.customImages = customImages self.defaultResourceSpec = defaultResourceSpec + self.emrSettings = emrSettings self.lifecycleConfigArns = lifecycleConfigArns } @@ -22028,6 +22163,7 @@ extension SageMaker { } try self.validate(self.customImages, name: "customImages", parent: name, max: 200) try self.defaultResourceSpec?.validate(name: "\(name).defaultResourceSpec") + try self.emrSettings?.validate(name: "\(name).emrSettings") try self.lifecycleConfigArns?.forEach { try validate($0, name: "lifecycleConfigArns[]", parent: name, max: 256) try validate($0, name: "lifecycleConfigArns[]", parent: name, pattern: "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:studio-lifecycle-config/") @@ -22038,6 +22174,7 @@ extension SageMaker { case codeRepositories = "CodeRepositories" case customImages = "CustomImages" case defaultResourceSpec = "DefaultResourceSpec" + case emrSettings = "EmrSettings" case lifecycleConfigArns = "LifecycleConfigArns" } } @@ -31648,6 +31785,10 @@ extension SageMaker { public let condition: ConditionStepMetadata? /// The configurations and outcomes of an Amazon EMR step execution. public let emr: EMRStepMetadata? + /// The endpoint that was invoked during this step execution. + public let endpoint: EndpointStepMetadata? + /// The endpoint configuration used to create an endpoint during this step execution. + public let endpointConfig: EndpointConfigStepMetadata? /// The configurations and outcomes of a Fail step execution. public let fail: FailStepMetadata? /// The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of output parameters. @@ -31667,12 +31808,14 @@ extension SageMaker { /// The Amazon Resource Name (ARN) of the tuning job that was run by this step execution. public let tuningJob: TuningJobStepMetaData? - public init(autoMLJob: AutoMLJobStepMetadata? = nil, callback: CallbackStepMetadata? = nil, clarifyCheck: ClarifyCheckStepMetadata? = nil, condition: ConditionStepMetadata? = nil, emr: EMRStepMetadata? = nil, fail: FailStepMetadata? = nil, lambda: LambdaStepMetadata? = nil, model: ModelStepMetadata? = nil, processingJob: ProcessingJobStepMetadata? = nil, qualityCheck: QualityCheckStepMetadata? = nil, registerModel: RegisterModelStepMetadata? = nil, trainingJob: TrainingJobStepMetadata? = nil, transformJob: TransformJobStepMetadata? = nil, tuningJob: TuningJobStepMetaData? = nil) { + public init(autoMLJob: AutoMLJobStepMetadata? = nil, callback: CallbackStepMetadata? = nil, clarifyCheck: ClarifyCheckStepMetadata? = nil, condition: ConditionStepMetadata? = nil, emr: EMRStepMetadata? = nil, endpoint: EndpointStepMetadata? = nil, endpointConfig: EndpointConfigStepMetadata? = nil, fail: FailStepMetadata? = nil, lambda: LambdaStepMetadata? = nil, model: ModelStepMetadata? = nil, processingJob: ProcessingJobStepMetadata? = nil, qualityCheck: QualityCheckStepMetadata? = nil, registerModel: RegisterModelStepMetadata? = nil, trainingJob: TrainingJobStepMetadata? = nil, transformJob: TransformJobStepMetadata? = nil, tuningJob: TuningJobStepMetaData? = nil) { self.autoMLJob = autoMLJob self.callback = callback self.clarifyCheck = clarifyCheck self.condition = condition self.emr = emr + self.endpoint = endpoint + self.endpointConfig = endpointConfig self.fail = fail self.lambda = lambda self.model = model @@ -31690,6 +31833,8 @@ extension SageMaker { case clarifyCheck = "ClarifyCheck" case condition = "Condition" case emr = "EMR" + case endpoint = "Endpoint" + case endpointConfig = "EndpointConfig" case fail = "Fail" case lambda = "Lambda" case model = "Model" @@ -32202,7 +32347,7 @@ extension SageMaker { public let coreDumpConfig: ProductionVariantCoreDumpConfig? /// You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoint. You can turn on or turn off SSM access for a production variant behind an existing endpoint by creating a new endpoint configuration and calling UpdateEndpoint. public let enableSSMAccess: Bool? - /// Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. + /// Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. The AMI version names, and their configurations, are the following: al2-ami-sagemaker-inference-gpu-2 Accelerator: GPU NVIDIA driver version: 535.54.03 CUDA driver version: 12.2 Supported instance types: ml.g4dn.*, ml.g5.*, ml.g6.*, ml.p3.*, ml.p4d.*, ml.p4de.*, ml.p5.* public let inferenceAmiVersion: ProductionVariantInferenceAmiVersion? /// Number of instances to launch initially. public let initialInstanceCount: Int? diff --git a/Sources/Soto/Services/Support/Support_shapes.swift b/Sources/Soto/Services/Support/Support_shapes.swift index efff9b762d..ad9dd75a12 100644 --- a/Sources/Soto/Services/Support/Support_shapes.swift +++ b/Sources/Soto/Services/Support/Support_shapes.swift @@ -159,7 +159,7 @@ extension Support { public let serviceCode: String? /// The code for the severity level returned by the call to DescribeSeverityLevels. public let severityCode: String? - /// The status of the case. Valid values: opened pending-customer-action reopened resolved unassigned work-in-progress + /// The status of the case. Valid values: all-open customer-action-completed opened pending-customer-action reopened resolved unassigned work-in-progress public let status: String? /// The subject line for the case in the Amazon Web Services Support Center. public let subject: String? diff --git a/Sources/Soto/Services/Tnb/Tnb_api.swift b/Sources/Soto/Services/Tnb/Tnb_api.swift index 3f481e52bb..095858e666 100644 --- a/Sources/Soto/Services/Tnb/Tnb_api.swift +++ b/Sources/Soto/Services/Tnb/Tnb_api.swift @@ -86,7 +86,7 @@ public struct Tnb: AWSService { ) } - /// Creates a function package. A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network. For more information, see Function packages in the Amazon Web Services Telco Network Builder User Guide. Creating a function package is the first step for creating a network in AWS TNB. This request creates an empty container with an ID. The next step is to upload the actual CSAR zip file into that empty container. To upload function package content, see PutSolFunctionPackageContent. + /// Creates a function package. A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network. For more information, see Function packages in the Amazon Web Services Telco Network Builder User Guide. Creating a function package is the first step for creating a network in AWS TNB. This request creates an empty container with an ID. The next step is to upload the actual CSAR zip file into that empty container. To upload function package content, see PutSolFunctionPackageContent. @Sendable public func createSolFunctionPackage(_ input: CreateSolFunctionPackageInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSolFunctionPackageOutput { return try await self.client.execute( @@ -112,7 +112,7 @@ public struct Tnb: AWSService { ) } - /// Creates a network package. A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on. For more information, see Network instances in the Amazon Web Services Telco Network Builder User Guide. A network package consists of a network service descriptor (NSD) file (required) and any additional files (optional), such as scripts specific to your needs. For example, if you have multiple function packages in your network package, you can use the NSD to define which network functions should run in certain VPCs, subnets, or EKS clusters. This request creates an empty network package container with an ID. Once you create a network package, you can upload the network package content using PutSolNetworkPackageContent. + /// Creates a network package. A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on. For more information, see Network instances in the Amazon Web Services Telco Network Builder User Guide. A network package consists of a network service descriptor (NSD) file (required) and any additional files (optional), such as scripts specific to your needs. For example, if you have multiple function packages in your network package, you can use the NSD to define which network functions should run in certain VPCs, subnets, or EKS clusters. This request creates an empty network package container with an ID. Once you create a network package, you can upload the network package content using PutSolNetworkPackageContent. @Sendable public func createSolNetworkPackage(_ input: CreateSolNetworkPackageInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSolNetworkPackageOutput { return try await self.client.execute( @@ -164,7 +164,7 @@ public struct Tnb: AWSService { ) } - /// Gets the details of a network function instance, including the instantation state and metadata from the function package descriptor in the network function package. A network function instance is a function in a function package . + /// Gets the details of a network function instance, including the instantiation state and metadata from the function package descriptor in the network function package. A network function instance is a function in a function package . @Sendable public func getSolFunctionInstance(_ input: GetSolFunctionInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetSolFunctionInstanceOutput { return try await self.client.execute( @@ -450,7 +450,7 @@ public struct Tnb: AWSService { ) } - /// Update a network instance. A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. + /// Update a network instance. A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. Choose the updateType parameter to target the necessary update of the network instance. @Sendable public func updateSolNetworkInstance(_ input: UpdateSolNetworkInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSolNetworkInstanceOutput { return try await self.client.execute( @@ -643,7 +643,8 @@ extension Tnb.ListSolNetworkOperationsInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Tnb.ListSolNetworkOperationsInput { return .init( maxResults: self.maxResults, - nextToken: token + nextToken: token, + nsInstanceId: self.nsInstanceId ) } } diff --git a/Sources/Soto/Services/Tnb/Tnb_shapes.swift b/Sources/Soto/Services/Tnb/Tnb_shapes.swift index 5bf27e8f40..97446f6e87 100644 --- a/Sources/Soto/Services/Tnb/Tnb_shapes.swift +++ b/Sources/Soto/Services/Tnb/Tnb_shapes.swift @@ -52,10 +52,13 @@ extension Tnb { case impaired = "IMPAIRED" case instantiateInProgress = "INSTANTIATE_IN_PROGRESS" case instantiated = "INSTANTIATED" + case intentToUpdateInProgress = "INTENT_TO_UPDATE_IN_PROGRESS" case notInstantiated = "NOT_INSTANTIATED" case stopped = "STOPPED" case terminateInProgress = "TERMINATE_IN_PROGRESS" + case updateFailed = "UPDATE_FAILED" case updateInProgress = "UPDATE_IN_PROGRESS" + case updated = "UPDATED" public var description: String { return self.rawValue } } @@ -109,6 +112,7 @@ extension Tnb { public enum UpdateSolNetworkType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case modifyVnfInformation = "MODIFY_VNF_INFORMATION" + case updateNs = "UPDATE_NS" public var description: String { return self.rawValue } } @@ -660,7 +664,7 @@ extension Tnb { public let tags: [String: String]? /// Function package usage state. public let usageState: UsageState - /// Function package descriptor ID. + /// Function package descriptor ID. public let vnfdId: String? /// Function package descriptor version. public let vnfdVersion: String? @@ -820,17 +824,29 @@ extension Tnb { public struct GetSolNetworkOperationMetadata: AWSDecodableShape { /// The date that the resource was created. public let createdAt: Date + /// Metadata related to the network operation occurrence for network instantiation. This is populated only if the lcmOperationType is INSTANTIATE. + public let instantiateMetadata: InstantiateMetadata? /// The date that the resource was last modified. public let lastModified: Date + /// Metadata related to the network operation occurrence for network function updates in a network instance. This is populated only if the lcmOperationType is UPDATE and the updateType is MODIFY_VNF_INFORMATION. + public let modifyVnfInfoMetadata: ModifyVnfInfoMetadata? + /// Metadata related to the network operation occurrence for network instance updates. This is populated only if the lcmOperationType is UPDATE and the updateType is UPDATE_NS. + public let updateNsMetadata: UpdateNsMetadata? - public init(createdAt: Date, lastModified: Date) { + public init(createdAt: Date, instantiateMetadata: InstantiateMetadata? = nil, lastModified: Date, modifyVnfInfoMetadata: ModifyVnfInfoMetadata? = nil, updateNsMetadata: UpdateNsMetadata? = nil) { self.createdAt = createdAt + self.instantiateMetadata = instantiateMetadata self.lastModified = lastModified + self.modifyVnfInfoMetadata = modifyVnfInfoMetadata + self.updateNsMetadata = updateNsMetadata } private enum CodingKeys: String, CodingKey { case createdAt = "createdAt" + case instantiateMetadata = "instantiateMetadata" case lastModified = "lastModified" + case modifyVnfInfoMetadata = "modifyVnfInfoMetadata" + case updateNsMetadata = "updateNsMetadata" } } @@ -853,8 +869,10 @@ extension Tnb { public let tags: [String: String]? /// All tasks associated with this operation occurrence. public let tasks: [GetSolNetworkOperationTaskDetails]? + /// Type of the update. Only present if the network operation lcmOperationType is UPDATE. + public let updateType: UpdateSolNetworkType? - public init(arn: String, error: ProblemDetails? = nil, id: String? = nil, lcmOperationType: LcmOperationType? = nil, metadata: GetSolNetworkOperationMetadata? = nil, nsInstanceId: String? = nil, operationState: NsLcmOperationState? = nil, tags: [String: String]? = nil, tasks: [GetSolNetworkOperationTaskDetails]? = nil) { + public init(arn: String, error: ProblemDetails? = nil, id: String? = nil, lcmOperationType: LcmOperationType? = nil, metadata: GetSolNetworkOperationMetadata? = nil, nsInstanceId: String? = nil, operationState: NsLcmOperationState? = nil, tags: [String: String]? = nil, tasks: [GetSolNetworkOperationTaskDetails]? = nil, updateType: UpdateSolNetworkType? = nil) { self.arn = arn self.error = error self.id = id @@ -864,6 +882,7 @@ extension Tnb { self.operationState = operationState self.tags = tags self.tasks = tasks + self.updateType = updateType } private enum CodingKeys: String, CodingKey { @@ -876,6 +895,7 @@ extension Tnb { case operationState = "operationState" case tags = "tags" case tasks = "tasks" + case updateType = "updateType" } } @@ -1147,6 +1167,23 @@ extension Tnb { } } + public struct InstantiateMetadata: AWSDecodableShape { + /// The configurable properties used during instantiation. + public let additionalParamsForNs: String? + /// The network service descriptor used for instantiating the network instance. + public let nsdInfoId: String + + public init(additionalParamsForNs: String? = nil, nsdInfoId: String) { + self.additionalParamsForNs = additionalParamsForNs + self.nsdInfoId = nsdInfoId + } + + private enum CodingKeys: String, CodingKey { + case additionalParamsForNs = "additionalParamsForNs" + case nsdInfoId = "nsdInfoId" + } + } + public struct InstantiateSolNetworkInstanceInput: AWSEncodableShape { /// Provides values for the configurable properties. public let additionalParamsForNs: String? @@ -1154,7 +1191,7 @@ extension Tnb { public let dryRun: Bool? /// ID of the network instance. public let nsInstanceId: String - /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs. + /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs. public let tags: [String: String]? public init(additionalParamsForNs: String? = nil, dryRun: Bool? = nil, nsInstanceId: String, tags: [String: String]? = nil) { @@ -1191,7 +1228,7 @@ extension Tnb { public struct InstantiateSolNetworkInstanceOutput: AWSDecodableShape { /// The identifier of the network operation. public let nsLcmOpOccId: String - /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs. + /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs. public let tags: [String: String]? public init(nsLcmOpOccId: String, tags: [String: String]? = nil) { @@ -1528,8 +1565,10 @@ extension Tnb { public let nsInstanceId: String /// The state of the network operation. public let operationState: NsLcmOperationState + /// Type of the update. Only present if the network operation lcmOperationType is UPDATE. + public let updateType: UpdateSolNetworkType? - public init(arn: String, error: ProblemDetails? = nil, id: String, lcmOperationType: LcmOperationType, metadata: ListSolNetworkOperationsMetadata? = nil, nsInstanceId: String, operationState: NsLcmOperationState) { + public init(arn: String, error: ProblemDetails? = nil, id: String, lcmOperationType: LcmOperationType, metadata: ListSolNetworkOperationsMetadata? = nil, nsInstanceId: String, operationState: NsLcmOperationState, updateType: UpdateSolNetworkType? = nil) { self.arn = arn self.error = error self.id = id @@ -1537,6 +1576,7 @@ extension Tnb { self.metadata = metadata self.nsInstanceId = nsInstanceId self.operationState = operationState + self.updateType = updateType } private enum CodingKeys: String, CodingKey { @@ -1547,6 +1587,7 @@ extension Tnb { case metadata = "metadata" case nsInstanceId = "nsInstanceId" case operationState = "operationState" + case updateType = "updateType" } } @@ -1555,10 +1596,13 @@ extension Tnb { public let maxResults: Int? /// The token for the next page of results. public let nextToken: String? + /// Network instance id filter, to retrieve network operations associated to a network instance. + public let nsInstanceId: String? - public init(maxResults: Int? = nil, nextToken: String? = nil) { + public init(maxResults: Int? = nil, nextToken: String? = nil, nsInstanceId: String? = nil) { self.maxResults = maxResults self.nextToken = nextToken + self.nsInstanceId = nsInstanceId } public func encode(to encoder: Encoder) throws { @@ -1566,6 +1610,11 @@ extension Tnb { _ = encoder.container(keyedBy: CodingKeys.self) request.encodeQuery(self.maxResults, key: "max_results") request.encodeQuery(self.nextToken, key: "nextpage_opaque_marker") + request.encodeQuery(self.nsInstanceId, key: "nsInstanceId") + } + + public func validate(name: String) throws { + try self.validate(self.nsInstanceId, name: "nsInstanceId", parent: name, pattern: "^ni-[a-f0-9]{17}$") } private enum CodingKeys: CodingKey {} @@ -1576,15 +1625,23 @@ extension Tnb { public let createdAt: Date /// The date that the resource was last modified. public let lastModified: Date + /// The network service descriptor id used for the operation. Only present if the updateType is UPDATE_NS. + public let nsdInfoId: String? + /// The network function id used for the operation. Only present if the updateType is MODIFY_VNF_INFO. + public let vnfInstanceId: String? - public init(createdAt: Date, lastModified: Date) { + public init(createdAt: Date, lastModified: Date, nsdInfoId: String? = nil, vnfInstanceId: String? = nil) { self.createdAt = createdAt self.lastModified = lastModified + self.nsdInfoId = nsdInfoId + self.vnfInstanceId = vnfInstanceId } private enum CodingKeys: String, CodingKey { case createdAt = "createdAt" case lastModified = "lastModified" + case nsdInfoId = "nsdInfoId" + case vnfInstanceId = "vnfInstanceId" } } @@ -1751,6 +1808,23 @@ extension Tnb { } } + public struct ModifyVnfInfoMetadata: AWSDecodableShape { + /// The configurable properties used during update of the network function instance. + public let vnfConfigurableProperties: String + /// The network function instance that was updated in the network instance. + public let vnfInstanceId: String + + public init(vnfConfigurableProperties: String, vnfInstanceId: String) { + self.vnfConfigurableProperties = vnfConfigurableProperties + self.vnfInstanceId = vnfInstanceId + } + + private enum CodingKeys: String, CodingKey { + case vnfConfigurableProperties = "vnfConfigurableProperties" + case vnfInstanceId = "vnfInstanceId" + } + } + public struct NetworkArtifactMeta: AWSDecodableShape { /// Lists network package overrides. public let overrides: [ToscaOverride]? @@ -1972,7 +2046,7 @@ extension Tnb { public struct TerminateSolNetworkInstanceInput: AWSEncodableShape { /// ID of the network instance. public let nsInstanceId: String - /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs. + /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs. public let tags: [String: String]? public init(nsInstanceId: String, tags: [String: String]? = nil) { @@ -2004,7 +2078,7 @@ extension Tnb { public struct TerminateSolNetworkInstanceOutput: AWSDecodableShape { /// The identifier of the network operation. public let nsLcmOpOccId: String? - /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs. + /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs. public let tags: [String: String]? public init(nsLcmOpOccId: String? = nil, tags: [String: String]? = nil) { @@ -2068,6 +2142,23 @@ extension Tnb { public init() {} } + public struct UpdateNsMetadata: AWSDecodableShape { + /// The configurable properties used during update. + public let additionalParamsForNs: String? + /// The network service descriptor used for updating the network instance. + public let nsdInfoId: String + + public init(additionalParamsForNs: String? = nil, nsdInfoId: String) { + self.additionalParamsForNs = additionalParamsForNs + self.nsdInfoId = nsdInfoId + } + + private enum CodingKeys: String, CodingKey { + case additionalParamsForNs = "additionalParamsForNs" + case nsdInfoId = "nsdInfoId" + } + } + public struct UpdateSolFunctionPackageInput: AWSEncodableShape { /// Operational state of the function package. public let operationalState: OperationalState @@ -2109,19 +2200,22 @@ extension Tnb { } public struct UpdateSolNetworkInstanceInput: AWSEncodableShape { - /// Identifies the network function information parameters and/or the configurable properties of the network function to be modified. + /// Identifies the network function information parameters and/or the configurable properties of the network function to be modified. Include this property only if the update type is MODIFY_VNF_INFORMATION. public let modifyVnfInfoData: UpdateSolNetworkModify? /// ID of the network instance. public let nsInstanceId: String - /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs. + /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs. public let tags: [String: String]? - /// The type of update. + /// Identifies the network service descriptor and the configurable properties of the descriptor, to be used for the update. Include this property only if the update type is UPDATE_NS. + public let updateNs: UpdateSolNetworkServiceData? + /// The type of update. Use the MODIFY_VNF_INFORMATION update type, to update a specific network function configuration, in the network instance. Use the UPDATE_NS update type, to update the network instance to a new network service descriptor. public let updateType: UpdateSolNetworkType - public init(modifyVnfInfoData: UpdateSolNetworkModify? = nil, nsInstanceId: String, tags: [String: String]? = nil, updateType: UpdateSolNetworkType) { + public init(modifyVnfInfoData: UpdateSolNetworkModify? = nil, nsInstanceId: String, tags: [String: String]? = nil, updateNs: UpdateSolNetworkServiceData? = nil, updateType: UpdateSolNetworkType) { self.modifyVnfInfoData = modifyVnfInfoData self.nsInstanceId = nsInstanceId self.tags = tags + self.updateNs = updateNs self.updateType = updateType } @@ -2131,6 +2225,7 @@ extension Tnb { try container.encodeIfPresent(self.modifyVnfInfoData, forKey: .modifyVnfInfoData) request.encodePath(self.nsInstanceId, key: "nsInstanceId") try container.encodeIfPresent(self.tags, forKey: .tags) + try container.encodeIfPresent(self.updateNs, forKey: .updateNs) try container.encode(self.updateType, forKey: .updateType) } @@ -2142,11 +2237,13 @@ extension Tnb { try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.updateNs?.validate(name: "\(name).updateNs") } private enum CodingKeys: String, CodingKey { case modifyVnfInfoData = "modifyVnfInfoData" case tags = "tags" + case updateNs = "updateNs" case updateType = "updateType" } } @@ -2154,7 +2251,7 @@ extension Tnb { public struct UpdateSolNetworkInstanceOutput: AWSDecodableShape { /// The identifier of the network operation. public let nsLcmOpOccId: String? - /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs. + /// A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs. public let tags: [String: String]? public init(nsLcmOpOccId: String? = nil, tags: [String: String]? = nil) { @@ -2229,6 +2326,27 @@ extension Tnb { } } + public struct UpdateSolNetworkServiceData: AWSEncodableShape { + /// Values for the configurable properties declared in the network service descriptor. + public let additionalParamsForNs: String? + /// ID of the network service descriptor. + public let nsdInfoId: String + + public init(additionalParamsForNs: String? = nil, nsdInfoId: String) { + self.additionalParamsForNs = additionalParamsForNs + self.nsdInfoId = nsdInfoId + } + + public func validate(name: String) throws { + try self.validate(self.nsdInfoId, name: "nsdInfoId", parent: name, pattern: "^np-[a-f0-9]{17}$") + } + + private enum CodingKeys: String, CodingKey { + case additionalParamsForNs = "additionalParamsForNs" + case nsdInfoId = "nsdInfoId" + } + } + public struct ValidateSolFunctionPackageContentInput: AWSEncodableShape { /// Function package content type. public let contentType: PackageContentType? diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift index 38832a7be1..d379009a15 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift @@ -280,7 +280,7 @@ public struct WorkSpaces: AWSService { ) } - /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. + /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing? @Sendable public func createWorkspaces(_ input: CreateWorkspacesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorkspacesResult { return try await self.client.execute( diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift index 2a119c8fa8..6ec3b9583b 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift @@ -413,6 +413,7 @@ extension WorkSpaces { case byolGraphics = "BYOL_GRAPHICS" case byolGraphicsG4Dn = "BYOL_GRAPHICS_G4DN" case byolGraphicsG4DnByop = "BYOL_GRAPHICS_G4DN_BYOP" + case byolGraphicsG4DnWsp = "BYOL_GRAPHICS_G4DN_WSP" case byolGraphicspro = "BYOL_GRAPHICSPRO" case byolRegular = "BYOL_REGULAR" case byolRegularByop = "BYOL_REGULAR_BYOP" @@ -900,7 +901,7 @@ extension WorkSpaces { } public struct Capacity: AWSEncodableShape { - /// The desired number of user sessions for a multi-session pool. This is not allowed for single-session pools. + /// The desired number of user sessions for the WorkSpaces in the pool. public let desiredUserSessions: Int public init(desiredUserSessions: Int) { @@ -917,11 +918,11 @@ extension WorkSpaces { } public struct CapacityStatus: AWSDecodableShape { - /// The number of user sessions currently being used for pool sessions. This only applies to multi-session pools. + /// The number of user sessions currently being used for your pool. public let activeUserSessions: Int - /// The total number of session slots that are available for a pool of WorkSpaces. + /// The total number of user sessions that are available for streaming or are currently streaming in your pool. ActualUserSessions = AvailableUserSessions + ActiveUserSessions public let actualUserSessions: Int - /// The number of user sessions currently being used for pool sessions. This only applies to multi-session pools. + /// The number of user sessions currently available for streaming from your pool. AvailableUserSessions = ActualUserSessions - ActiveUserSessions public let availableUserSessions: Int /// The total number of sessions slots that are either running or pending. This represents the total number of concurrent streaming sessions your pool can support in a steady state. public let desiredUserSessions: Int @@ -3544,7 +3545,7 @@ extension WorkSpaces { } public struct ImportWorkspaceImageRequest: AWSEncodableShape { - /// If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter is an array, only one item is allowed at this time. Windows 11 only supports Microsoft_Office_2019. + /// If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter is an array, only one item is allowed at this time. During the image import process, non-GPU WSP WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU WSP WorkSpaces with Windows 11 do not support Office installation. public let applications: [Application]? /// The identifier of the EC2 image. public let ec2ImageId: String @@ -5742,7 +5743,7 @@ extension WorkSpaces { public let protocols: [`Protocol`]? /// The size of the root volume. For important information about how to modify the size of the root and user volumes, see Modify a WorkSpace. public let rootVolumeSizeGib: Int? - /// The running mode. For more information, see Manage the WorkSpace Running Mode. The MANUAL value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. + /// The running mode. For more information, see Manage the WorkSpace Running Mode. The MANUAL value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing? public let runningMode: RunningMode? /// The time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. public let runningModeAutoStopTimeoutInMinutes: Int? diff --git a/models/amplify.json b/models/amplify.json index b3950ef5f8..f60c4a11cf 100644 --- a/models/amplify.json +++ b/models/amplify.json @@ -1204,6 +1204,12 @@ "traits": { "smithy.api#documentation": "\n

This is for internal use.

\n
\n

The Amplify service uses this parameter to specify the authentication protocol to use\n to access the Git repository for an Amplify app. Amplify specifies TOKEN\n for a GitHub repository, SIGV4 for an Amazon Web Services CodeCommit\n repository, and SSH for GitLab and Bitbucket repositories.

" } + }, + "cacheConfig": { + "target": "com.amazonaws.amplify#CacheConfig", + "traits": { + "smithy.api#documentation": "

The cache configuration for the Amplify app. If you don't specify the\n cache configuration type, Amplify uses the default\n AMPLIFY_MANAGED setting.

" + } } }, "traits": { @@ -1761,13 +1767,45 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.amplify#CacheConfig": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.amplify#CacheConfigType", + "traits": { + "smithy.api#documentation": "

The type of cache configuration to use for an Amplify app.

\n

The AMPLIFY_MANAGED cache configuration automatically applies an\n optimized cache configuration for your app based on its platform, routing rules, and\n rewrite rules. This is the default setting.

\n

The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the cache configuration for an Amplify app.

\n

For more\n information about how Amplify applies an optimal cache configuration for\n your app based on the type of content that is being served, see Managing cache configuration in the Amplify User\n guide.

" + } + }, + "com.amazonaws.amplify#CacheConfigType": { + "type": "enum", + "members": { + "AMPLIFY_MANAGED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMPLIFY_MANAGED" + } + }, + "AMPLIFY_MANAGED_NO_COOKIES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMPLIFY_MANAGED_NO_COOKIES" + } + } + } + }, "com.amazonaws.amplify#Certificate": { "type": "structure", "members": { "type": { "target": "com.amazonaws.amplify#CertificateType", "traits": { - "smithy.api#documentation": "

The type of SSL/TLS certificate that you want to use.

\n

Specify AMPLIFY_MANAGED to use the default certificate that Amplify\n provisions for you.

\n

Specify CUSTOM to use your own certificate that you have already added to\n Certificate Manager in your Amazon Web Services account. Make sure you request (or\n import) the certificate in the US East (N. Virginia) Region (us-east-1). For more\n information about using ACM, see Importing certificates into\n Certificate Manager in the ACM User\n guide .

", + "smithy.api#documentation": "

The type of SSL/TLS certificate that you want to use.

\n

Specify AMPLIFY_MANAGED to use the default certificate that Amplify\n provisions for you.

\n

Specify CUSTOM to use your own certificate that you have already added to\n Certificate Manager in your Amazon Web Services account. Make sure you request (or\n import) the certificate in the US East (N. Virginia) Region (us-east-1). For more\n information about using ACM, see Importing certificates into\n Certificate Manager in the ACM User\n guide.

", "smithy.api#required": {} } }, @@ -2035,6 +2073,12 @@ "traits": { "smithy.api#documentation": "

The automated branch creation configuration for an Amplify app.

" } + }, + "cacheConfig": { + "target": "com.amazonaws.amplify#CacheConfig", + "traits": { + "smithy.api#documentation": "

The cache configuration for the Amplify app.

" + } } }, "traits": { @@ -6161,6 +6205,12 @@ "traits": { "smithy.api#documentation": "

The personal access token for a GitHub repository for an Amplify app. The personal\n access token is used to authorize access to a GitHub repository using the Amplify GitHub\n App. The token is not stored.

\n

Use accessToken for GitHub repositories only. To authorize access to a\n repository provider such as Bitbucket or CodeCommit, use oauthToken.

\n

You must specify either accessToken or oauthToken when you\n update an app.

\n

Existing Amplify apps deployed from a GitHub repository using OAuth continue to work\n with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub\n App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the\n Amplify User Guide .

" } + }, + "cacheConfig": { + "target": "com.amazonaws.amplify#CacheConfig", + "traits": { + "smithy.api#documentation": "

The cache configuration for the Amplify app.

" + } } }, "traits": { diff --git a/models/appintegrations.json b/models/appintegrations.json index 91517580ea..1b334a7a7f 100644 --- a/models/appintegrations.json +++ b/models/appintegrations.json @@ -52,6 +52,9 @@ { "target": "com.amazonaws.appintegrations#CreateDataIntegration" }, + { + "target": "com.amazonaws.appintegrations#CreateDataIntegrationAssociation" + }, { "target": "com.amazonaws.appintegrations#CreateEventIntegration" }, @@ -106,6 +109,9 @@ { "target": "com.amazonaws.appintegrations#UpdateDataIntegration" }, + { + "target": "com.amazonaws.appintegrations#UpdateDataIntegrationAssociation" + }, { "target": "com.amazonaws.appintegrations#UpdateEventIntegration" } @@ -115,14 +121,14 @@ "sdkId": "AppIntegrations", "arnNamespace": "app-integrations", "cloudFormationName": "AppIntegrations", - "cloudTrailEventSource": "appintegrations.amazonaws.com", + "cloudTrailEventSource": "app-integrations.amazonaws.com", "endpointPrefix": "app-integrations" }, "aws.auth#sigv4": { "name": "app-integrations" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

The Amazon AppIntegrations service enables you to configure and reuse connections to external\n applications.

\n

For information about how you can use external applications with Amazon Connect, see\n Set up pre-built\n integrations and Deliver information to agents\n using Amazon Connect Wisdom in the Amazon Connect Administrator\n Guide.

", + "smithy.api#documentation": "\n

The Amazon AppIntegrations service enables you to configure and reuse connections to external\n applications.

\n

For information about how you can use external applications with Amazon Connect, see\n the following topics in the Amazon Connect Administrator\n Guide:

\n ", "smithy.api#title": "Amazon AppIntegrations Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1092,7 +1098,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release and subject to change.

\n

Creates and persists an Application resource.

", + "smithy.api#documentation": "

Creates and persists an Application resource.

", "smithy.api#examples": [ { "title": "To create an application", @@ -1249,6 +1255,113 @@ } } }, + "com.amazonaws.appintegrations#CreateDataIntegrationAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.appintegrations#CreateDataIntegrationAssociationRequest" + }, + "output": { + "target": "com.amazonaws.appintegrations#CreateDataIntegrationAssociationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appintegrations#AccessDeniedException" + }, + { + "target": "com.amazonaws.appintegrations#InternalServiceError" + }, + { + "target": "com.amazonaws.appintegrations#InvalidRequestException" + }, + { + "target": "com.amazonaws.appintegrations#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appintegrations#ResourceQuotaExceededException" + }, + { + "target": "com.amazonaws.appintegrations#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates and persists a DataIntegrationAssociation resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/dataIntegrations/{DataIntegrationIdentifier}/associations", + "code": 200 + } + } + }, + "com.amazonaws.appintegrations#CreateDataIntegrationAssociationRequest": { + "type": "structure", + "members": { + "DataIntegrationIdentifier": { + "target": "com.amazonaws.appintegrations#Identifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the DataIntegration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ClientId": { + "target": "com.amazonaws.appintegrations#ClientId", + "traits": { + "smithy.api#documentation": "

The identifier for the client that is associated with the DataIntegration\n association.

" + } + }, + "ObjectConfiguration": { + "target": "com.amazonaws.appintegrations#ObjectConfiguration" + }, + "DestinationURI": { + "target": "com.amazonaws.appintegrations#DestinationURI", + "traits": { + "smithy.api#documentation": "

The URI of the data destination.

" + } + }, + "ClientAssociationMetadata": { + "target": "com.amazonaws.appintegrations#ClientAssociationMetadata", + "traits": { + "smithy.api#documentation": "

The mapping of metadata to be extracted from the data.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.appintegrations#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} + } + }, + "ExecutionConfiguration": { + "target": "com.amazonaws.appintegrations#ExecutionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for how the files should be pulled from the source.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appintegrations#CreateDataIntegrationAssociationResponse": { + "type": "structure", + "members": { + "DataIntegrationAssociationId": { + "target": "com.amazonaws.appintegrations#UUID", + "traits": { + "smithy.api#documentation": "

A unique identifier. for the DataIntegrationAssociation.

" + } + }, + "DataIntegrationArn": { + "target": "com.amazonaws.appintegrations#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the DataIntegration.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appintegrations#CreateDataIntegrationRequest": { "type": "structure", "members": { @@ -1268,15 +1381,14 @@ "KmsKey": { "target": "com.amazonaws.appintegrations#NonBlankString", "traits": { - "smithy.api#documentation": "

The KMS key for the DataIntegration.

", + "smithy.api#documentation": "

The KMS key ARN for the DataIntegration.

", "smithy.api#required": {} } }, "SourceURI": { "target": "com.amazonaws.appintegrations#SourceURI", "traits": { - "smithy.api#documentation": "

The URI of the data source.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The URI of the data source.

" } }, "ScheduleConfig": { @@ -1345,7 +1457,7 @@ "KmsKey": { "target": "com.amazonaws.appintegrations#NonBlankString", "traits": { - "smithy.api#documentation": "

The KMS key for the DataIntegration.

" + "smithy.api#documentation": "

The KMS key ARN for the DataIntegration.

" } }, "SourceURI": { @@ -1508,6 +1620,21 @@ "traits": { "smithy.api#documentation": "

The identifier for the client that is associated with the DataIntegration\n association.

" } + }, + "DestinationURI": { + "target": "com.amazonaws.appintegrations#DestinationURI", + "traits": { + "smithy.api#documentation": "

The URI of the data destination.

" + } + }, + "LastExecutionStatus": { + "target": "com.amazonaws.appintegrations#LastExecutionStatus", + "traits": { + "smithy.api#documentation": "

The execution status of the last job.

" + } + }, + "ExecutionConfiguration": { + "target": "com.amazonaws.appintegrations#ExecutionConfiguration" } }, "traits": { @@ -1590,7 +1717,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the Application. Only Applications that don't have any Application Associations can be deleted.

", + "smithy.api#documentation": "

Deletes the Application. Only Applications that don't have any Application Associations\n can be deleted.

", "smithy.api#examples": [ { "title": "To delete an application", @@ -1755,6 +1882,16 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.appintegrations#DestinationURI": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#pattern": "^(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+$)|(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+[\\w/!@#+=.-]+[\\w/!@#+=.,-]+$)$" + } + }, "com.amazonaws.appintegrations#DuplicateResourceException": { "type": "structure", "members": { @@ -1935,6 +2072,67 @@ "smithy.api#pattern": "^[a-zA-Z0-9\\/\\._\\-]+::[a-zA-Z0-9\\/\\._\\-]+(?:\\*)?$" } }, + "com.amazonaws.appintegrations#ExecutionConfiguration": { + "type": "structure", + "members": { + "ExecutionMode": { + "target": "com.amazonaws.appintegrations#ExecutionMode", + "traits": { + "smithy.api#documentation": "

The mode for data import/export execution.

", + "smithy.api#required": {} + } + }, + "OnDemandConfiguration": { + "target": "com.amazonaws.appintegrations#OnDemandConfiguration" + }, + "ScheduleConfiguration": { + "target": "com.amazonaws.appintegrations#ScheduleConfiguration" + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for how the files should be pulled from the source.

" + } + }, + "com.amazonaws.appintegrations#ExecutionMode": { + "type": "enum", + "members": { + "ON_DEMAND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ON_DEMAND" + } + }, + "SCHEDULED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCHEDULED" + } + } + } + }, + "com.amazonaws.appintegrations#ExecutionStatus": { + "type": "enum", + "members": { + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.appintegrations#ExternalUrlConfig": { "type": "structure", "members": { @@ -2046,7 +2244,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release and subject to change.

\n

Get an Application resource.

", + "smithy.api#documentation": "

Get an Application resource.

", "smithy.api#examples": [ { "title": "To get an application", @@ -2249,13 +2447,13 @@ "Description": { "target": "com.amazonaws.appintegrations#Description", "traits": { - "smithy.api#documentation": "

The KMS key for the DataIntegration.

" + "smithy.api#documentation": "

The KMS key ARN for the DataIntegration.

" } }, "KmsKey": { "target": "com.amazonaws.appintegrations#NonBlankString", "traits": { - "smithy.api#documentation": "

The KMS key for the DataIntegration.

" + "smithy.api#documentation": "

The KMS key ARN for the DataIntegration.

" } }, "SourceURI": { @@ -2433,6 +2631,26 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.appintegrations#LastExecutionStatus": { + "type": "structure", + "members": { + "ExecutionStatus": { + "target": "com.amazonaws.appintegrations#ExecutionStatus", + "traits": { + "smithy.api#documentation": "

The job status enum string.

" + } + }, + "StatusMessage": { + "target": "com.amazonaws.appintegrations#NonBlankString", + "traits": { + "smithy.api#documentation": "

The status message of a job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The execution status of the last job.

" + } + }, "com.amazonaws.appintegrations#ListApplicationAssociations": { "type": "operation", "input": { @@ -2565,7 +2783,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release and subject to change.

\n

Lists applications in the account.

", + "smithy.api#documentation": "

Lists applications in the account.

", "smithy.api#examples": [ { "title": "To list applications in the account", @@ -3114,6 +3332,27 @@ "smithy.api#documentation": "

The configuration for what data should be pulled from the source.

" } }, + "com.amazonaws.appintegrations#OnDemandConfiguration": { + "type": "structure", + "members": { + "StartTime": { + "target": "com.amazonaws.appintegrations#NonBlankString", + "traits": { + "smithy.api#documentation": "

The start time for data pull from the source as an Unix/epoch string in\n milliseconds

", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "com.amazonaws.appintegrations#NonBlankString", + "traits": { + "smithy.api#documentation": "

The end time for data pull from the source as an Unix/epoch string in\n milliseconds

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The start and end time for data pull from the source.

" + } + }, "com.amazonaws.appintegrations#Permission": { "type": "string", "traits": { @@ -3526,7 +3765,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release and subject to change.

\n

Updates and persists an Application resource.

", + "smithy.api#documentation": "

Updates and persists an Application resource.

", "smithy.api#examples": [ { "title": "To update an application", @@ -3644,6 +3883,78 @@ } } }, + "com.amazonaws.appintegrations#UpdateDataIntegrationAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.appintegrations#UpdateDataIntegrationAssociationRequest" + }, + "output": { + "target": "com.amazonaws.appintegrations#UpdateDataIntegrationAssociationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appintegrations#AccessDeniedException" + }, + { + "target": "com.amazonaws.appintegrations#InternalServiceError" + }, + { + "target": "com.amazonaws.appintegrations#InvalidRequestException" + }, + { + "target": "com.amazonaws.appintegrations#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appintegrations#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates and persists a DataIntegrationAssociation resource.

\n \n

\n Updating a DataIntegrationAssociation with ExecutionConfiguration will rerun the on-demand job.\n

\n
", + "smithy.api#http": { + "method": "PATCH", + "uri": "/dataIntegrations/{DataIntegrationIdentifier}/associations/{DataIntegrationAssociationIdentifier}", + "code": 200 + } + } + }, + "com.amazonaws.appintegrations#UpdateDataIntegrationAssociationRequest": { + "type": "structure", + "members": { + "DataIntegrationIdentifier": { + "target": "com.amazonaws.appintegrations#Identifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the DataIntegration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "DataIntegrationAssociationIdentifier": { + "target": "com.amazonaws.appintegrations#Identifier", + "traits": { + "smithy.api#documentation": "

A unique identifier. of the DataIntegrationAssociation resource

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ExecutionConfiguration": { + "target": "com.amazonaws.appintegrations#ExecutionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for how the files should be pulled from the source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appintegrations#UpdateDataIntegrationAssociationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appintegrations#UpdateDataIntegrationRequest": { "type": "structure", "members": { diff --git a/models/application-auto-scaling.json b/models/application-auto-scaling.json index 1cc31a38a7..149c976a81 100644 --- a/models/application-auto-scaling.json +++ b/models/application-auto-scaling.json @@ -2379,6 +2379,18 @@ "traits": { "smithy.api#enumValue": "WorkSpacesAverageUserSessionsCapacityUtilization" } + }, + "SageMakerInferenceComponentConcurrentRequestsPerCopyHighResolution": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SageMakerInferenceComponentConcurrentRequestsPerCopyHighResolution" + } + }, + "SageMakerVariantConcurrentRequestsPerModelHighResolution": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SageMakerVariantConcurrentRequestsPerModelHighResolution" + } } } }, diff --git a/models/application-signals.json b/models/application-signals.json index b292b17c31..53a60d683e 100644 --- a/models/application-signals.json +++ b/models/application-signals.json @@ -88,7 +88,7 @@ "aws.endpoints#dualStackOnlyEndpoints": {}, "aws.endpoints#standardRegionalEndpoints": {}, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "\n

This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability\n release.

\n
\n

Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. \n It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. \n The application-centric view provides you with unified visibility across your applications, services, and \n dependencies, so you can proactively monitor and efficiently triage any issues that may arise, \n ensuring optimal customer experience.

\n

Application Signals provides the following benefits:

\n
    \n
  • \n

    Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors.

    \n
  • \n
  • \n

    Create and monitor service level objectives (SLOs).

    \n
  • \n
  • \n

    See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity.

    \n
  • \n
", + "smithy.api#documentation": "

Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. \n It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. \n The application-centric view provides you with unified visibility across your applications, services, and \n dependencies, so you can proactively monitor and efficiently triage any issues that may arise, \n ensuring optimal customer experience.

\n

Application Signals provides the following benefits:

\n
    \n
  • \n

    Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors.

    \n
  • \n
  • \n

    Create and monitor service level objectives (SLOs).

    \n
  • \n
  • \n

    See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity.

    \n
  • \n
\n

Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, \n and application names within dashboards and maps.

", "smithy.api#title": "Amazon CloudWatch Application Signals", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -842,7 +842,7 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "StartTime", "smithy.api#required": {} } @@ -850,7 +850,7 @@ "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "EndTime", "smithy.api#required": {} } @@ -940,16 +940,22 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057.

", + "smithy.api#documentation": "

The start time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057.

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057.

", + "smithy.api#documentation": "

The end time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057.

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } + }, + "LogGroupReferences": { + "target": "com.amazonaws.applicationsignals#LogGroupReferences", + "traits": { + "smithy.api#documentation": "

An array of string-to-string maps that each contain information about one log group associated with this service. Each \n string-to-string map includes the following fields:

\n
    \n
  • \n

    \n \"Type\": \"AWS::Resource\"\n

    \n
  • \n
  • \n

    \n \"ResourceType\": \"AWS::Logs::LogGroup\"\n

    \n
  • \n
  • \n

    \n \"Identifier\": \"name-of-log-group\"\n

    \n
  • \n
" + } } }, "traits": { @@ -1056,7 +1062,7 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "StartTime", "smithy.api#required": {} } @@ -1064,7 +1070,7 @@ "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested end time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "EndTime", "smithy.api#required": {} } @@ -1111,14 +1117,14 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, @@ -1178,7 +1184,7 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "StartTime", "smithy.api#required": {} } @@ -1186,7 +1192,7 @@ "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "EndTime", "smithy.api#required": {} } @@ -1233,14 +1239,14 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, @@ -1406,7 +1412,7 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "StartTime", "smithy.api#required": {} } @@ -1414,7 +1420,7 @@ "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested end time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "EndTime", "smithy.api#required": {} } @@ -1452,14 +1458,14 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, @@ -1519,7 +1525,7 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "StartTime", "smithy.api#required": {} } @@ -1527,7 +1533,7 @@ "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

Your requested start time will be rounded to the nearest hour.

", "smithy.api#httpQuery": "EndTime", "smithy.api#required": {} } @@ -1567,14 +1573,14 @@ "StartTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, "EndTime": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour.

", "smithy.api#required": {} } }, @@ -1652,6 +1658,12 @@ "smithy.api#output": {} } }, + "com.amazonaws.applicationsignals#LogGroupReferences": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#Attributes" + } + }, "com.amazonaws.applicationsignals#Metric": { "type": "structure", "members": { @@ -1895,7 +1907,7 @@ "ResourceId": { "target": "com.amazonaws.applicationsignals#ResourceId", "traits": { - "smithy.api#documentation": "

Cannot find the resource id.

", + "smithy.api#documentation": "

Can't find the resource id.

", "smithy.api#required": {} } }, @@ -1979,6 +1991,12 @@ "smithy.api#documentation": "

An array of structures that each contain information about one metric associated with this service.

", "smithy.api#required": {} } + }, + "LogGroupReferences": { + "target": "com.amazonaws.applicationsignals#LogGroupReferences", + "traits": { + "smithy.api#documentation": "

An array of string-to-string maps that each contain information about one log group associated with this service. Each \n string-to-string map includes the following fields:

\n
    \n
  • \n

    \n \"Type\": \"AWS::Resource\"\n

    \n
  • \n
  • \n

    \n \"ResourceType\": \"AWS::Logs::LogGroup\"\n

    \n
  • \n
  • \n

    \n \"Identifier\": \"name-of-log-group\"\n

    \n
  • \n
" + } } }, "traits": { diff --git a/models/appstream.json b/models/appstream.json index 210a178f1c..3d58a636ab 100644 --- a/models/appstream.json +++ b/models/appstream.json @@ -2036,7 +2036,7 @@ "target": "com.amazonaws.appstream#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The instance type to use when launching fleet instances. The following instance types are available:

\n
    \n
  • \n

    stream.standard.small

    \n
  • \n
  • \n

    stream.standard.medium

    \n
  • \n
  • \n

    stream.standard.large

    \n
  • \n
  • \n

    stream.standard.xlarge

    \n
  • \n
  • \n

    stream.standard.2xlarge

    \n
  • \n
  • \n

    stream.compute.large

    \n
  • \n
  • \n

    stream.compute.xlarge

    \n
  • \n
  • \n

    stream.compute.2xlarge

    \n
  • \n
  • \n

    stream.compute.4xlarge

    \n
  • \n
  • \n

    stream.compute.8xlarge

    \n
  • \n
  • \n

    stream.memory.large

    \n
  • \n
  • \n

    stream.memory.xlarge

    \n
  • \n
  • \n

    stream.memory.2xlarge

    \n
  • \n
  • \n

    stream.memory.4xlarge

    \n
  • \n
  • \n

    stream.memory.8xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.large

    \n
  • \n
  • \n

    stream.memory.z1d.xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.2xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.3xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.6xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.12xlarge

    \n
  • \n
  • \n

    stream.graphics-design.large

    \n
  • \n
  • \n

    stream.graphics-design.xlarge

    \n
  • \n
  • \n

    stream.graphics-design.2xlarge

    \n
  • \n
  • \n

    stream.graphics-design.4xlarge

    \n
  • \n
  • \n

    stream.graphics-desktop.2xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.2xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.4xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.8xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.12xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.16xlarge

    \n
  • \n
  • \n

    stream.graphics-pro.4xlarge

    \n
  • \n
  • \n

    stream.graphics-pro.8xlarge

    \n
  • \n
  • \n

    stream.graphics-pro.16xlarge

    \n
  • \n
\n

The following instance types are available for Elastic fleets:

\n
    \n
  • \n

    stream.standard.small

    \n
  • \n
  • \n

    stream.standard.medium

    \n
  • \n
  • \n

    stream.standard.large

    \n
  • \n
  • \n

    stream.standard.xlarge

    \n
  • \n
  • \n

    stream.standard.2xlarge

    \n
  • \n
", + "smithy.api#documentation": "

The instance type to use when launching fleet instances. The following instance types are available:

\n
    \n
  • \n

    stream.standard.small

    \n
  • \n
  • \n

    stream.standard.medium

    \n
  • \n
  • \n

    stream.standard.large

    \n
  • \n
  • \n

    stream.standard.xlarge

    \n
  • \n
  • \n

    stream.standard.2xlarge

    \n
  • \n
  • \n

    stream.compute.large

    \n
  • \n
  • \n

    stream.compute.xlarge

    \n
  • \n
  • \n

    stream.compute.2xlarge

    \n
  • \n
  • \n

    stream.compute.4xlarge

    \n
  • \n
  • \n

    stream.compute.8xlarge

    \n
  • \n
  • \n

    stream.memory.large

    \n
  • \n
  • \n

    stream.memory.xlarge

    \n
  • \n
  • \n

    stream.memory.2xlarge

    \n
  • \n
  • \n

    stream.memory.4xlarge

    \n
  • \n
  • \n

    stream.memory.8xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.large

    \n
  • \n
  • \n

    stream.memory.z1d.xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.2xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.3xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.6xlarge

    \n
  • \n
  • \n

    stream.memory.z1d.12xlarge

    \n
  • \n
  • \n

    stream.graphics-design.large

    \n
  • \n
  • \n

    stream.graphics-design.xlarge

    \n
  • \n
  • \n

    stream.graphics-design.2xlarge

    \n
  • \n
  • \n

    stream.graphics-design.4xlarge

    \n
  • \n
  • \n

    stream.graphics-desktop.2xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.2xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.4xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.8xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.12xlarge

    \n
  • \n
  • \n

    stream.graphics.g4dn.16xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.2xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.4xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.8xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.12xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.16xlarge

    \n
  • \n
  • \n

    stream.graphics.g5.24xlarge

    \n
  • \n
  • \n

    stream.graphics-pro.4xlarge

    \n
  • \n
  • \n

    stream.graphics-pro.8xlarge

    \n
  • \n
  • \n

    stream.graphics-pro.16xlarge

    \n
  • \n
\n

The following instance types are available for Elastic fleets:

\n
    \n
  • \n

    stream.standard.small

    \n
  • \n
  • \n

    stream.standard.medium

    \n
  • \n
  • \n

    stream.standard.large

    \n
  • \n
  • \n

    stream.standard.xlarge

    \n
  • \n
  • \n

    stream.standard.2xlarge

    \n
  • \n
", "smithy.api#required": {} } }, @@ -2067,7 +2067,7 @@ "DisconnectTimeoutInSeconds": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

\n

Specify a value between 60 and 360000.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

\n

Specify a value between 60 and 36000.

" } }, "Description": { @@ -2103,7 +2103,7 @@ "IdleDisconnectTimeoutInSeconds": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the DisconnectTimeoutInSeconds time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n they try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds elapses, they are\n disconnected.

\n

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

\n \n

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

\n
" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the DisconnectTimeoutInSeconds time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n they try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds elapses, they are\n disconnected.

\n

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0.

\n \n

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

\n
" } }, "IamRoleArn": { @@ -2610,6 +2610,106 @@ "smithy.api#output": {} } }, + "com.amazonaws.appstream#CreateThemeForStack": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#CreateThemeForStackRequest" + }, + "output": { + "target": "com.amazonaws.appstream#CreateThemeForStackResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.appstream#InvalidAccountStatusException" + }, + { + "target": "com.amazonaws.appstream#LimitExceededException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceAlreadyExistsException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates custom branding that customizes the appearance of the streaming application catalog page.

" + } + }, + "com.amazonaws.appstream#CreateThemeForStackRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the stack for the theme.

", + "smithy.api#required": {} + } + }, + "FooterLinks": { + "target": "com.amazonaws.appstream#ThemeFooterLinks", + "traits": { + "smithy.api#documentation": "

The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites.

" + } + }, + "TitleText": { + "target": "com.amazonaws.appstream#ThemeTitleText", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The title that is displayed at the top of the browser tab during users' application streaming sessions.

", + "smithy.api#required": {} + } + }, + "ThemeStyling": { + "target": "com.amazonaws.appstream#ThemeStyling", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page.

", + "smithy.api#required": {} + } + }, + "OrganizationLogoS3Location": { + "target": "com.amazonaws.appstream#S3Location", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The organization logo that appears on the streaming application catalog page.

", + "smithy.api#required": {} + } + }, + "FaviconS3Location": { + "target": "com.amazonaws.appstream#S3Location", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appstream#CreateThemeForStackResult": { + "type": "structure", + "members": { + "Theme": { + "target": "com.amazonaws.appstream#Theme", + "traits": { + "smithy.api#documentation": "

The theme object that contains the metadata of the custom branding.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appstream#CreateUpdatedImage": { "type": "operation", "input": { @@ -3337,6 +3437,52 @@ "smithy.api#output": {} } }, + "com.amazonaws.appstream#DeleteThemeForStack": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#DeleteThemeForStackRequest" + }, + "output": { + "target": "com.amazonaws.appstream#DeleteThemeForStackResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes custom branding that customizes the appearance of the streaming application catalog page.

" + } + }, + "com.amazonaws.appstream#DeleteThemeForStackRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the stack for the theme.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appstream#DeleteThemeForStackResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appstream#DeleteUsageReportSubscription": { "type": "operation", "input": { @@ -4382,7 +4528,20 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described.

" + "smithy.api#documentation": "

Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described.

", + "smithy.test#smokeTests": [ + { + "id": "DescribeStacksSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.appstream#DescribeStacksRequest": { @@ -4425,6 +4584,56 @@ "smithy.api#output": {} } }, + "com.amazonaws.appstream#DescribeThemeForStack": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#DescribeThemeForStackRequest" + }, + "output": { + "target": "com.amazonaws.appstream#DescribeThemeForStackResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves a list that describes the theme for a specified stack. A theme is custom branding that customizes the appearance of the streaming application catalog page.

" + } + }, + "com.amazonaws.appstream#DescribeThemeForStackRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the stack for the theme.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appstream#DescribeThemeForStackResult": { + "type": "structure", + "members": { + "Theme": { + "target": "com.amazonaws.appstream#Theme", + "traits": { + "smithy.api#documentation": "

The theme object that contains the metadata of the custom branding.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appstream#DescribeUsageReportSubscriptions": { "type": "operation", "input": { @@ -5027,6 +5236,23 @@ } } }, + "com.amazonaws.appstream#DynamicAppProvidersEnabled": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.appstream#EmbedHostDomain": { "type": "string", "traits": { @@ -5398,7 +5624,7 @@ "DisconnectTimeoutInSeconds": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

\n

Specify a value between 60 and 360000.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

\n

Specify a value between 60 and 36000.

" } }, "State": { @@ -5442,7 +5668,7 @@ "IdleDisconnectTimeoutInSeconds": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the DisconnectTimeoutInSeconds time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds elapses, they are\n disconnected.

\n

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

\n \n

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

\n
" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the DisconnectTimeoutInSeconds time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds elapses, they are\n disconnected.

\n

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0.

\n \n

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

\n
" } }, "IamRoleArn": { @@ -5929,6 +6155,30 @@ "traits": { "smithy.api#documentation": "

Describes the errors that are returned when a new image can't be created.

" } + }, + "LatestAppstreamAgentVersion": { + "target": "com.amazonaws.appstream#LatestAppstreamAgentVersion", + "traits": { + "smithy.api#documentation": "

Indicates whether the image is using the latest AppStream 2.0 agent version or not.

" + } + }, + "SupportedInstanceFamilies": { + "target": "com.amazonaws.appstream#StringList", + "traits": { + "smithy.api#documentation": "

The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported:

\n
    \n
  • \n

    General Purpose

    \n
  • \n
  • \n

    Compute Optimized

    \n
  • \n
  • \n

    Memory Optimized

    \n
  • \n
  • \n

    Graphics

    \n
  • \n
  • \n

    Graphics Design

    \n
  • \n
  • \n

    Graphics Pro

    \n
  • \n
  • \n

    Graphics G4

    \n
  • \n
  • \n

    Graphics G5

    \n
  • \n
" + } + }, + "DynamicAppProvidersEnabled": { + "target": "com.amazonaws.appstream#DynamicAppProvidersEnabled", + "traits": { + "smithy.api#documentation": "

Indicates whether dynamic app providers are enabled within an AppStream 2.0 image or not.

" + } + }, + "ImageSharedWithOthers": { + "target": "com.amazonaws.appstream#ImageSharedWithOthers", + "traits": { + "smithy.api#documentation": "

Indicates whether the image is shared with another account ID.

" + } } }, "traits": { @@ -6044,6 +6294,12 @@ "traits": { "smithy.api#documentation": "

The list of virtual private cloud (VPC) interface endpoint objects. Administrators can connect to the image builder only through the specified endpoints.

" } + }, + "LatestAppstreamAgentVersion": { + "target": "com.amazonaws.appstream#LatestAppstreamAgentVersion", + "traits": { + "smithy.api#documentation": "

Indicates whether the image builder is using the latest AppStream 2.0 agent version or not.

" + } } }, "traits": { @@ -6190,6 +6446,23 @@ "smithy.api#documentation": "

Describes the permissions for an image.

" } }, + "com.amazonaws.appstream#ImageSharedWithOthers": { + "type": "enum", + "members": { + "TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRUE" + } + }, + "FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FALSE" + } + } + } + }, "com.amazonaws.appstream#ImageState": { "type": "enum", "members": { @@ -6361,6 +6634,23 @@ "target": "com.amazonaws.appstream#LastReportGenerationExecutionError" } }, + "com.amazonaws.appstream#LatestAppstreamAgentVersion": { + "type": "enum", + "members": { + "TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRUE" + } + }, + "FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FALSE" + } + } + } + }, "com.amazonaws.appstream#LimitExceededException": { "type": "structure", "members": { @@ -6792,6 +7082,9 @@ { "target": "com.amazonaws.appstream#CreateStreamingURL" }, + { + "target": "com.amazonaws.appstream#CreateThemeForStack" + }, { "target": "com.amazonaws.appstream#CreateUpdatedImage" }, @@ -6831,6 +7124,9 @@ { "target": "com.amazonaws.appstream#DeleteStack" }, + { + "target": "com.amazonaws.appstream#DeleteThemeForStack" + }, { "target": "com.amazonaws.appstream#DeleteUsageReportSubscription" }, @@ -6876,6 +7172,9 @@ { "target": "com.amazonaws.appstream#DescribeStacks" }, + { + "target": "com.amazonaws.appstream#DescribeThemeForStack" + }, { "target": "com.amazonaws.appstream#DescribeUsageReportSubscriptions" }, @@ -6962,6 +7261,9 @@ }, { "target": "com.amazonaws.appstream#UpdateStack" + }, + { + "target": "com.amazonaws.appstream#UpdateThemeForStack" } ], "traits": { @@ -7872,6 +8174,12 @@ "traits": { "smithy.api#enumValue": "AMAZON_LINUX2" } + }, + "RHEL8": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RHEL8" + } } } }, @@ -9114,6 +9422,180 @@ } } }, + "com.amazonaws.appstream#Theme": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

The stack that has the custom branding theme.

" + } + }, + "State": { + "target": "com.amazonaws.appstream#ThemeState", + "traits": { + "smithy.api#documentation": "

The state of the theme.

" + } + }, + "ThemeTitleText": { + "target": "com.amazonaws.appstream#ThemeTitleText", + "traits": { + "smithy.api#documentation": "

The browser tab page title.

" + } + }, + "ThemeStyling": { + "target": "com.amazonaws.appstream#ThemeStyling", + "traits": { + "smithy.api#documentation": "

The color that is used for the website links, text, buttons, and catalog page background.

" + } + }, + "ThemeFooterLinks": { + "target": "com.amazonaws.appstream#ThemeFooterLinks", + "traits": { + "smithy.api#documentation": "

The website links that display in the catalog page footer.

" + } + }, + "ThemeOrganizationLogoURL": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

The URL of the logo that displays in the catalog page header.

" + } + }, + "ThemeFaviconURL": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

The URL of the icon that displays at the top of a user's browser tab during streaming sessions.

" + } + }, + "CreatedTime": { + "target": "com.amazonaws.appstream#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the theme was created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The custom branding theme, which might include a custom logo, website links, and other branding to display to users.

" + } + }, + "com.amazonaws.appstream#ThemeAttribute": { + "type": "enum", + "members": { + "FOOTER_LINKS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FOOTER_LINKS" + } + } + } + }, + "com.amazonaws.appstream#ThemeAttributes": { + "type": "list", + "member": { + "target": "com.amazonaws.appstream#ThemeAttribute" + } + }, + "com.amazonaws.appstream#ThemeFooterLink": { + "type": "structure", + "members": { + "DisplayName": { + "target": "com.amazonaws.appstream#ThemeFooterLinkDisplayName", + "traits": { + "smithy.api#documentation": "

The name of the websites that display in the catalog page footer.

" + } + }, + "FooterLinkURL": { + "target": "com.amazonaws.appstream#ThemeFooterLinkURL", + "traits": { + "smithy.api#documentation": "

The URL of the websites that display in the catalog page footer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The website links that display in the catalog page footer.

" + } + }, + "com.amazonaws.appstream#ThemeFooterLinkDisplayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 300 + }, + "smithy.api#pattern": "^[-@./#&+\\w\\s]*$" + } + }, + "com.amazonaws.appstream#ThemeFooterLinkURL": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.appstream#ThemeFooterLinks": { + "type": "list", + "member": { + "target": "com.amazonaws.appstream#ThemeFooterLink" + } + }, + "com.amazonaws.appstream#ThemeState": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.appstream#ThemeStyling": { + "type": "enum", + "members": { + "LIGHT_BLUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LIGHT_BLUE" + } + }, + "BLUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLUE" + } + }, + "PINK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PINK" + } + }, + "RED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RED" + } + } + } + }, + "com.amazonaws.appstream#ThemeTitleText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 300 + }, + "smithy.api#pattern": "^[-@./#&+\\w\\s]*$" + } + }, "com.amazonaws.appstream#Timestamp": { "type": "timestamp" }, @@ -9642,7 +10124,7 @@ "DisconnectTimeoutInSeconds": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

\n

Specify a value between 60 and 360000.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

\n

Specify a value between 60 and 36000.

" } }, "DeleteVpcConfig": { @@ -9679,7 +10161,7 @@ "IdleDisconnectTimeoutInSeconds": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the DisconnectTimeoutInSeconds time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds elapses, they are\n disconnected.

\n

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

\n \n

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

\n
" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the DisconnectTimeoutInSeconds time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds elapses, they are\n disconnected.

\n

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0.

\n \n

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

\n
" } }, "AttributesToDelete": { @@ -9955,6 +10437,110 @@ "smithy.api#output": {} } }, + "com.amazonaws.appstream#UpdateThemeForStack": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#UpdateThemeForStackRequest" + }, + "output": { + "target": "com.amazonaws.appstream#UpdateThemeForStackResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.appstream#InvalidAccountStatusException" + }, + { + "target": "com.amazonaws.appstream#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.appstream#LimitExceededException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates custom branding that customizes the appearance of the streaming application catalog page.

" + } + }, + "com.amazonaws.appstream#UpdateThemeForStackRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the stack for the theme.

", + "smithy.api#required": {} + } + }, + "FooterLinks": { + "target": "com.amazonaws.appstream#ThemeFooterLinks", + "traits": { + "smithy.api#documentation": "

The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites.

" + } + }, + "TitleText": { + "target": "com.amazonaws.appstream#ThemeTitleText", + "traits": { + "smithy.api#documentation": "

The title that is displayed at the top of the browser tab during users' application streaming sessions.

" + } + }, + "ThemeStyling": { + "target": "com.amazonaws.appstream#ThemeStyling", + "traits": { + "smithy.api#documentation": "

The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page.

" + } + }, + "OrganizationLogoS3Location": { + "target": "com.amazonaws.appstream#S3Location", + "traits": { + "smithy.api#documentation": "

The organization logo that appears on the streaming application catalog page.

" + } + }, + "FaviconS3Location": { + "target": "com.amazonaws.appstream#S3Location", + "traits": { + "smithy.api#documentation": "

The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions.

" + } + }, + "State": { + "target": "com.amazonaws.appstream#ThemeState", + "traits": { + "smithy.api#documentation": "

Specifies whether custom branding should be applied to catalog page or not.

" + } + }, + "AttributesToDelete": { + "target": "com.amazonaws.appstream#ThemeAttributes", + "traits": { + "smithy.api#documentation": "

The attributes to delete.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appstream#UpdateThemeForStackResult": { + "type": "structure", + "members": { + "Theme": { + "target": "com.amazonaws.appstream#Theme", + "traits": { + "smithy.api#documentation": "

The theme object that contains the metadata of the custom branding.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appstream#UsageReportExecutionErrorCode": { "type": "enum", "members": { @@ -10149,7 +10735,7 @@ "MaximumLength": { "target": "com.amazonaws.appstream#Integer", "traits": { - "smithy.api#documentation": "

Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.

\n

This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions.

\n

This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED.

\n

This can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets.

\n

The value can be between 1 and 20,971,520 (20 MB).

" + "smithy.api#documentation": "

Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.

\n

This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions.

\n

This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED.

\n

The value can be between 1 and 20,971,520 (20 MB).

" } } }, diff --git a/models/appsync.json b/models/appsync.json index 2ad99c15ca..2810fcf07e 100644 --- a/models/appsync.json +++ b/models/appsync.json @@ -6054,6 +6054,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/apikeys", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "apiKeys", + "pageSize": "maxResults" } } }, @@ -6136,6 +6142,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/datasources", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "dataSources", + "pageSize": "maxResults" } } }, @@ -6215,6 +6227,12 @@ "method": "GET", "uri": "/v1/domainnames", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "domainNameConfigs", + "pageSize": "maxResults" } } }, @@ -6289,6 +6307,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/functions", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "functions", + "pageSize": "maxResults" } } }, @@ -6368,6 +6392,12 @@ "method": "GET", "uri": "/v1/apis", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "graphqlApis", + "pageSize": "maxResults" } } }, @@ -6456,6 +6486,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/types/{typeName}/resolvers", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "resolvers", + "pageSize": "maxResults" } } }, @@ -6487,6 +6523,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/functions/{functionId}/resolvers", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "resolvers", + "pageSize": "maxResults" } } }, @@ -6636,6 +6678,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/sourceApiAssociations", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "sourceApiAssociationSummaries", + "pageSize": "maxResults" } } }, @@ -6788,6 +6836,12 @@ "method": "GET", "uri": "/v1/apis/{apiId}/types", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "types", + "pageSize": "maxResults" } } }, @@ -6822,6 +6876,12 @@ "method": "GET", "uri": "/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations/{associationId}/types", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "types", + "pageSize": "maxResults" } } }, diff --git a/models/auto-scaling.json b/models/auto-scaling.json index d6d9e29ef2..c95f52f510 100644 --- a/models/auto-scaling.json +++ b/models/auto-scaling.json @@ -828,7 +828,7 @@ } }, "VPCZoneIdentifier": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen2047", + "target": "com.amazonaws.autoscaling#XmlStringMaxLen5000", "traits": { "smithy.api#documentation": "

One or more subnet IDs, if applicable, separated by commas.

" } @@ -3019,7 +3019,7 @@ } }, "VPCZoneIdentifier": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen2047", + "target": "com.amazonaws.autoscaling#XmlStringMaxLen5000", "traits": { "smithy.api#documentation": "

A comma-separated list of subnet IDs for a virtual private cloud (VPC) where instances\n in the Auto Scaling group can be created. If you specify VPCZoneIdentifier with\n AvailabilityZones, the subnets that you specify must reside in those\n Availability Zones.

" } @@ -10884,7 +10884,7 @@ } }, "VPCZoneIdentifier": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen2047", + "target": "com.amazonaws.autoscaling#XmlStringMaxLen5000", "traits": { "smithy.api#documentation": "

A comma-separated list of subnet IDs for a virtual private cloud (VPC). If you specify\n VPCZoneIdentifier with AvailabilityZones, the subnets that\n you specify must reside in those Availability Zones.

" } @@ -11122,6 +11122,16 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" } }, + "com.amazonaws.autoscaling#XmlStringMaxLen5000": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5000 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + } + }, "com.amazonaws.autoscaling#XmlStringMaxLen511": { "type": "string", "traits": { diff --git a/models/batch.json b/models/batch.json index d8b89bb51d..c7f094b52c 100644 --- a/models/batch.json +++ b/models/batch.json @@ -1588,7 +1588,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels a job in an Batch job queue. Jobs that are in the\n SUBMITTED\n or\n PENDING\n are\n canceled. A job\n inRUNNABLE remains in RUNNABLE until it reaches the head of the\n job queue. Then the job status is updated to\n FAILED.

\n \n

A PENDING job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING\n status.

\n

When you try to cancel an array parent job in PENDING, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.

\n
\n

Jobs that progressed to the STARTING or\n RUNNING state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.

", + "smithy.api#documentation": "

Cancels a job in an Batch job queue. Jobs that are in a SUBMITTED, PENDING, or RUNNABLE state are cancelled and the job status is updated to FAILED.

\n \n

A PENDING job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING\n status.

\n

When you try to cancel an array parent job in PENDING, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.

\n
\n

Jobs that progressed to the STARTING or\n RUNNING state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.

", "smithy.api#examples": [ { "title": "To cancel a job", @@ -1748,6 +1748,12 @@ "traits": { "smithy.api#documentation": "

Unique identifier for the compute environment.

" } + }, + "context": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } } }, "traits": { @@ -2582,6 +2588,12 @@ "traits": { "smithy.api#documentation": "

The details for the Amazon EKS cluster that supports the compute environment.

" } + }, + "context": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } } }, "traits": { @@ -8014,6 +8026,12 @@ "traits": { "smithy.api#documentation": "

Specifies the updated infrastructure update policy for the compute environment. For more\n information about infrastructure updates, see Updating compute environments in\n the Batch User Guide.

" } + }, + "context": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } } }, "traits": { diff --git a/models/bedrock-agent-runtime.json b/models/bedrock-agent-runtime.json index 24ceb9b078..dd66417971 100644 --- a/models/bedrock-agent-runtime.json +++ b/models/bedrock-agent-runtime.json @@ -1696,27 +1696,27 @@ "nodeName": { "target": "com.amazonaws.bedrockagentruntime#NodeName", "traits": { - "smithy.api#documentation": "

A name for the input of the flow input node.

", + "smithy.api#documentation": "

The name of the flow input node that begins the prompt flow.

", "smithy.api#required": {} } }, "nodeOutputName": { "target": "com.amazonaws.bedrockagentruntime#NodeOutputName", "traits": { - "smithy.api#documentation": "

A name for the output of the flow input node.

", + "smithy.api#documentation": "

The name of the output from the flow input node that begins the prompt flow.

", "smithy.api#required": {} } }, "content": { "target": "com.amazonaws.bedrockagentruntime#FlowInputContent", "traits": { - "smithy.api#documentation": "

Contains information about an input into the flow.

", + "smithy.api#documentation": "

Contains information about an input into the prompt flow.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains information about an input into the flow and what to do with it.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains information about an input into the prompt flow and where to send it.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#FlowInputContent": { @@ -1725,7 +1725,7 @@ "document": { "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "

The input for the flow input node.

" + "smithy.api#documentation": "

The input to send to the prompt flow input node.

" } } }, @@ -1752,12 +1752,12 @@ "document": { "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "

A name for the output of the flow.

" + "smithy.api#documentation": "

The content in the output.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about the output node.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains information about the content in an output from prompt flow invocation.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#FlowOutputEvent": { @@ -1766,27 +1766,27 @@ "nodeName": { "target": "com.amazonaws.bedrockagentruntime#NodeName", "traits": { - "smithy.api#documentation": "

The name of the node to which input was provided.

", + "smithy.api#documentation": "

The name of the flow output node that the output is from.

", "smithy.api#required": {} } }, "nodeType": { "target": "com.amazonaws.bedrockagentruntime#NodeType", "traits": { - "smithy.api#documentation": "

The type of node to which input was provided.

", + "smithy.api#documentation": "

The type of the node that the output is from.

", "smithy.api#required": {} } }, "content": { "target": "com.amazonaws.bedrockagentruntime#FlowOutputContent", "traits": { - "smithy.api#documentation": "

The output of the node.

", + "smithy.api#documentation": "

The content in the output.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains information about an output from flow invoction.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Contains information about an output from prompt flow invoction.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -2938,7 +2938,7 @@ "topP": { "target": "com.amazonaws.bedrockagentruntime#TopP", "traits": { - "smithy.api#documentation": "

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.

" + "smithy.api#documentation": "

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 0.8, the model only selects the next token from the top 80% of the probability distribution of next tokens.

" } }, "topK": { @@ -3334,7 +3334,7 @@ } ], "traits": { - "smithy.api#documentation": "

Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

\n \n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeFlow.

\n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -3750,6 +3750,21 @@ } } }, + "com.amazonaws.bedrockagentruntime#Metadata": { + "type": "structure", + "members": { + "usage": { + "target": "com.amazonaws.bedrockagentruntime#Usage", + "traits": { + "smithy.api#documentation": "

Contains details of the foundation model usage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides details of the foundation model.

", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagentruntime#MimeType": { "type": "string" }, @@ -3945,6 +3960,33 @@ "smithy.api#documentation": "

Settings for how the model processes the prompt prior to retrieval and generation.

" } }, + "com.amazonaws.bedrockagentruntime#OrchestrationModelInvocationOutput": { + "type": "structure", + "members": { + "traceId": { + "target": "com.amazonaws.bedrockagentruntime#TraceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the trace.

" + } + }, + "rawResponse": { + "target": "com.amazonaws.bedrockagentruntime#RawResponse", + "traits": { + "smithy.api#documentation": "

Contains details of the raw response from the foundation model output.

" + } + }, + "metadata": { + "target": "com.amazonaws.bedrockagentruntime#Metadata", + "traits": { + "smithy.api#documentation": "

Contains information about the foundation model output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The foundation model output from the orchestration step.

", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagentruntime#OrchestrationTrace": { "type": "union", "members": { @@ -3971,6 +4013,12 @@ "traits": { "smithy.api#documentation": "

The input for the orchestration step.

\n
    \n
  • \n

    The type is ORCHESTRATION.

    \n
  • \n
  • \n

    The text contains the prompt.

    \n
  • \n
  • \n

    The inferenceConfiguration, parserMode, and overrideLambda values are set in the PromptOverrideConfiguration object that was set when the agent was created or updated.

    \n
  • \n
" } + }, + "modelInvocationOutput": { + "target": "com.amazonaws.bedrockagentruntime#OrchestrationModelInvocationOutput", + "traits": { + "smithy.api#documentation": "

Contains information pertaining to the output from the foundation model that is being invoked.

" + } } }, "traits": { @@ -4354,6 +4402,21 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#RawResponse": { + "type": "structure", + "members": { + "content": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The foundation model's raw output content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the raw output from the foundation model.

", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagentruntime#RepromptResponse": { "type": "structure", "members": { @@ -5641,6 +5704,27 @@ } } }, + "com.amazonaws.bedrockagentruntime#Usage": { + "type": "structure", + "members": { + "inputTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Contains information about the input tokens from the foundation model usage.

" + } + }, + "outputTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Contains information about the output tokens from the foundation model usage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information of the usage of the foundation model.

", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagentruntime#ValidationException": { "type": "structure", "members": { diff --git a/models/bedrock-runtime.json b/models/bedrock-runtime.json index e6963c7808..d73d5b706f 100644 --- a/models/bedrock-runtime.json +++ b/models/bedrock-runtime.json @@ -1051,6 +1051,9 @@ { "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException" + }, { "target": "com.amazonaws.bedrockruntime#ThrottlingException" }, @@ -1059,7 +1062,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends messages to the specified Amazon Bedrock model. Converse provides\n a consistent interface that works with all models that\n support messages. This allows you to write code once and use it with different models.\n Should a model have unique inference parameters, you can also pass those unique parameters\n to the model.

\n

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n

\n

For example code, see Converse API examples in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModel action.

", + "smithy.api#documentation": "

Sends messages to the specified Amazon Bedrock model. Converse provides\n a consistent interface that works with all models that\n support messages. This allows you to write code once and use it with different models.\n If a model has unique inference parameters, you can also pass those unique parameters\n to the model.

\n

Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.

\n

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n

\n

For example code, see Converse API examples in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModel action.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1233,6 +1236,9 @@ { "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException" + }, { "target": "com.amazonaws.bedrockruntime#ThrottlingException" }, @@ -1241,7 +1247,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends messages to the specified Amazon Bedrock model and returns\n the response in a stream. ConverseStream provides a consistent API\n that works with all Amazon Bedrock models that support messages.\n This allows you to write code once and use it with different models. Should a\n model have unique inference parameters, you can also pass those unique parameters to the\n model.

\n

To find out if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n

\n

For example code, see Conversation streaming example in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModelWithResponseStream action.

", + "smithy.api#documentation": "

Sends messages to the specified Amazon Bedrock model and returns\n the response in a stream. ConverseStream provides a consistent API\n that works with all Amazon Bedrock models that support messages.\n This allows you to write code once and use it with different models. Should a\n model have unique inference parameters, you can also pass those unique parameters to the\n model.

\n

To find out if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n \n

The CLI doesn't support streaming operations in Amazon Bedrock, including ConverseStream.

\n
\n

Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.

\n

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n

\n

For example code, see Conversation streaming example in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModelWithResponseStream action.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1354,6 +1360,12 @@ "traits": { "smithy.api#documentation": "

The number of requests exceeds the limit. Resubmit your request later.

" } + }, + "serviceUnavailableException": { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException", + "traits": { + "smithy.api#documentation": "

The service isn't currently available. Try again later.

" + } } }, "traits": { @@ -2930,6 +2942,9 @@ { "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" }, + { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException" + }, { "target": "com.amazonaws.bedrockruntime#ThrottlingException" }, @@ -3072,6 +3087,9 @@ { "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" }, + { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException" + }, { "target": "com.amazonaws.bedrockruntime#ThrottlingException" }, @@ -3080,7 +3098,7 @@ } ], "traits": { - "smithy.api#documentation": "

Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.

\n

To see if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n \n

The CLI doesn't support InvokeModelWithResponseStream.

\n
\n

For example code, see Invoke model with streaming code\n example in the Amazon Bedrock User Guide.\n

\n

This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action.

", + "smithy.api#documentation": "

Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.

\n

To see if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n \n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream.

\n
\n

For example code, see Invoke model with streaming code\n example in the Amazon Bedrock User Guide.\n

\n

This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3415,7 +3433,7 @@ "throttlingException": { "target": "com.amazonaws.bedrockruntime#ThrottlingException", "traits": { - "smithy.api#documentation": "

The number or frequency of requests exceeds the limit. Resubmit your request later.

" + "smithy.api#documentation": "

Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process.

" } }, "modelTimeoutException": { @@ -3423,6 +3441,9 @@ "traits": { "smithy.api#documentation": "

The request took too long to process. Processing time exceeded the model timeout length.

" } + }, + "serviceUnavailableException": { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException" } }, "traits": { @@ -3438,11 +3459,24 @@ } }, "traits": { - "smithy.api#documentation": "

The number of requests exceeds the service quota. Resubmit your request later.

", + "smithy.api#documentation": "

Your request exceeds the service quota for your account. You can view your quotas at Viewing service quotas. You can resubmit your request later.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } }, + "com.amazonaws.bedrockruntime#ServiceUnavailableException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockruntime#NonBlankString" + } + }, + "traits": { + "smithy.api#documentation": "

The service isn't currently available. Try again later.

", + "smithy.api#error": "server", + "smithy.api#httpError": 503 + } + }, "com.amazonaws.bedrockruntime#SpecificToolChoice": { "type": "structure", "members": { @@ -3542,7 +3576,7 @@ } }, "traits": { - "smithy.api#documentation": "

The number of requests exceeds the limit. Resubmit your request later.

", + "smithy.api#documentation": "

Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process.

", "smithy.api#error": "client", "smithy.api#httpError": 429 } diff --git a/models/bedrock.json b/models/bedrock.json index 77d9d57b78..3bd7a91247 100644 --- a/models/bedrock.json +++ b/models/bedrock.json @@ -14,6 +14,12 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.bedrock#AccountId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9]{12}$" + } + }, "com.amazonaws.bedrock#AmazonBedrockControlPlaneService": { "type": "service", "version": "2023-04-20", @@ -27,6 +33,9 @@ { "target": "com.amazonaws.bedrock#LoggingResource" }, + { + "target": "com.amazonaws.bedrock#ModelCopyResource" + }, { "target": "com.amazonaws.bedrock#ModelResource" }, @@ -863,7 +872,7 @@ } ], "traits": { - "smithy.api#documentation": "

API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluations.

", + "smithy.api#documentation": "

API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluation.

", "smithy.api#http": { "code": 202, "method": "POST", @@ -1209,6 +1218,94 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrock#CreateModelCopyJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrock#CreateModelCopyJobRequest" + }, + "output": { + "target": "com.amazonaws.bedrock#CreateModelCopyJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrock#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrock#InternalServerException" + }, + { + "target": "com.amazonaws.bedrock#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrock#TooManyTagsException" + } + ], + "traits": { + "smithy.api#documentation": "

Copies a model to another region so that it can be used there. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/model-copy-jobs" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.bedrock#CreateModelCopyJobRequest": { + "type": "structure", + "members": { + "sourceModelArn": { + "target": "com.amazonaws.bedrock#ModelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model to be copied.

", + "smithy.api#required": {} + } + }, + "targetModelName": { + "target": "com.amazonaws.bedrock#CustomModelName", + "traits": { + "smithy.api#documentation": "

A name for the copied model.

", + "smithy.api#required": {} + } + }, + "modelKmsKeyId": { + "target": "com.amazonaws.bedrock#KmsKeyId", + "traits": { + "smithy.api#documentation": "

The ARN of the KMS key that you use to encrypt the model copy.

" + } + }, + "targetModelTags": { + "target": "com.amazonaws.bedrock#TagList", + "traits": { + "smithy.api#documentation": "

Tags to associate with the target model. For more information, see Tag resources in the Amazon Bedrock User Guide.

" + } + }, + "clientRequestToken": { + "target": "com.amazonaws.bedrock#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n\t Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrock#CreateModelCopyJobResponse": { + "type": "structure", + "members": { + "jobArn": { + "target": "com.amazonaws.bedrock#ModelCopyJobArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model copy job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.bedrock#CreateModelCustomizationJob": { "type": "operation", "input": { @@ -1244,7 +1341,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a fine-tuning job to customize a base model.

\n

You specify the base foundation model and the location of the training data.\n After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.\n

\n

For information on the format of training and validation data, see Prepare the datasets.

\n

\n Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size.\n To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status.

\n

For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Creates a fine-tuning job to customize a base model.

\n

You specify the base foundation model and the location of the training data.\n After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.\n

\n

For information on the format of training and validation data, see Prepare the datasets.

\n

\n Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size.\n To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status.

\n

For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 201, "method": "POST", @@ -1345,7 +1442,7 @@ "vpcConfig": { "target": "com.amazonaws.bedrock#VpcConfig", "traits": { - "smithy.api#documentation": "

VPC configuration (optional). Configuration parameters for the\n private Virtual Private Cloud (VPC) that contains the resources you are using for this job.

" + "smithy.api#documentation": "

VPC configuration (optional). Configuration parameters for the\n private Virtual Private Cloud (VPC) that contains the resources you are using for this job.

" } } }, @@ -1400,7 +1497,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 201, "method": "POST", @@ -1422,7 +1519,7 @@ "modelUnits": { "target": "com.amazonaws.bedrock#PositiveInteger", "traits": { - "smithy.api#documentation": "

Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs.

\n

For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide.

\n

For more information about what an MU specifies, contact your Amazon Web Services account manager.

", + "smithy.api#documentation": "

Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs.

\n

For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide.

\n

For more information about what an MU specifies, contact your Amazon Web Services account manager.

", "smithy.api#required": {} } }, @@ -1436,14 +1533,14 @@ "modelId": { "target": "com.amazonaws.bedrock#ModelIdentifier", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#required": {} } }, "commitmentDuration": { "target": "com.amazonaws.bedrock#CommitmentDuration", "traits": { - "smithy.api#documentation": "

The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field.

\n

Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide

" + "smithy.api#documentation": "

The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field.

\n

Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide\n

" } }, "tags": { @@ -1489,7 +1586,7 @@ "min": 1, "max": 63 }, - "smithy.api#pattern": "^([0-9a-zA-Z][_-]?)+$" + "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,63}$" } }, "com.amazonaws.bedrock#CustomModelSummary": { @@ -1535,6 +1632,12 @@ "traits": { "smithy.api#documentation": "

Specifies whether to carry out continued pre-training of a model or whether to fine-tune it. For more information, see Custom models.

" } + }, + "ownerAccountId": { + "target": "com.amazonaws.bedrock#AccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the account that owns the model.

" + } } }, "traits": { @@ -1593,7 +1696,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "DELETE", @@ -1765,7 +1868,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "DELETE", @@ -2553,7 +2656,7 @@ } ], "traits": { - "smithy.api#documentation": "

Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -2703,7 +2806,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the properties associated with a model evaluation job, including the\n status of the job. For more information, see Model evaluations.

", + "smithy.api#documentation": "

Retrieves the properties associated with a model evaluation job, including the\n status of the job. For more information, see Model evaluation.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -3071,6 +3174,137 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrock#GetModelCopyJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrock#GetModelCopyJobRequest" + }, + "output": { + "target": "com.amazonaws.bedrock#GetModelCopyJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrock#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrock#InternalServerException" + }, + { + "target": "com.amazonaws.bedrock#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrock#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrock#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a model copy job. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/model-copy-jobs/{jobArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.bedrock#GetModelCopyJobRequest": { + "type": "structure", + "members": { + "jobArn": { + "target": "com.amazonaws.bedrock#ModelCopyJobArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model copy job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrock#GetModelCopyJobResponse": { + "type": "structure", + "members": { + "jobArn": { + "target": "com.amazonaws.bedrock#ModelCopyJobArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model copy job.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrock#ModelCopyJobStatus", + "traits": { + "smithy.api#documentation": "

The status of the model copy job.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.bedrock#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the model copy job was created.

", + "smithy.api#required": {} + } + }, + "targetModelArn": { + "target": "com.amazonaws.bedrock#CustomModelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the copied model.

", + "smithy.api#required": {} + } + }, + "targetModelName": { + "target": "com.amazonaws.bedrock#CustomModelName", + "traits": { + "smithy.api#documentation": "

The name of the copied model.

" + } + }, + "sourceAccountId": { + "target": "com.amazonaws.bedrock#AccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the account that the model being copied originated from.

", + "smithy.api#required": {} + } + }, + "sourceModelArn": { + "target": "com.amazonaws.bedrock#ModelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the original model being copied.

", + "smithy.api#required": {} + } + }, + "targetModelKmsKeyArn": { + "target": "com.amazonaws.bedrock#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key encrypting the copied model.

" + } + }, + "targetModelTags": { + "target": "com.amazonaws.bedrock#TagList", + "traits": { + "smithy.api#documentation": "

The tags associated with the copied model.

" + } + }, + "failureMessage": { + "target": "com.amazonaws.bedrock#ErrorMessage", + "traits": { + "smithy.api#documentation": "

An error message for why the model copy job failed.

" + } + }, + "sourceModelName": { + "target": "com.amazonaws.bedrock#CustomModelName", + "traits": { + "smithy.api#documentation": "

The name of the original model being copied.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.bedrock#GetModelCustomizationJob": { "type": "operation", "input": { @@ -3097,7 +3331,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the properties associated with a model-customization job, including the status of the job.\n For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Retrieves the properties associated with a model-customization job, including the status of the job.\n For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -3168,7 +3402,7 @@ "status": { "target": "com.amazonaws.bedrock#ModelCustomizationJobStatus", "traits": { - "smithy.api#documentation": "

The status of the job. A successful job transitions from in-progress to completed when the output model is ready to use.\n If the job failed, the failure message contains information about why the job failed.

" + "smithy.api#documentation": "

The status of the job. A successful job transitions from in-progress to completed when the output model is ready to use.\n If the job failed, the failure message contains information about why the job failed.

" } }, "failureMessage": { @@ -3342,7 +3576,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -3941,7 +4175,7 @@ "type": { "target": "com.amazonaws.bedrock#GuardrailPiiEntityType", "traits": { - "smithy.api#documentation": "

The type of PII entity. For example, Social Security Number.

", + "smithy.api#documentation": "

The type of PII entity. For exampvle, Social Security Number.

", "smithy.api#required": {} } }, @@ -4845,7 +5079,7 @@ } }, "traits": { - "smithy.api#documentation": "

In a model evaluation job that uses human workers you must\n define the name of the metric, and how you want that metric rated\n ratingMethod, and an optional description of the metric.

" + "smithy.api#documentation": "

In a model evaluation job that uses human workers you must \n define the name of the metric, and how you want that metric rated\n ratingMethod, and an optional description of the metric.

" } }, "com.amazonaws.bedrock#HumanEvaluationCustomMetrics": { @@ -4999,7 +5233,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of the custom models that you have created with the CreateModelCustomizationJob operation.

\n

For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Returns a list of the custom models that you have created with the CreateModelCustomizationJob operation.

\n

For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -5055,14 +5289,14 @@ "maxResults": { "target": "com.amazonaws.bedrock#MaxResults", "traits": { - "smithy.api#documentation": "

Maximum number of results to return in the response.

", + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", "smithy.api#httpQuery": "maxResults" } }, "nextToken": { "target": "com.amazonaws.bedrock#PaginationToken", "traits": { - "smithy.api#documentation": "

Continuation token from the previous response, for Amazon Bedrock to list the next set of results.

", + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", "smithy.api#httpQuery": "nextToken" } }, @@ -5079,6 +5313,13 @@ "smithy.api#documentation": "

The sort order of the results.

", "smithy.api#httpQuery": "sortOrder" } + }, + "isOwned": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false).

", + "smithy.api#httpQuery": "isOwned" + } } }, "traits": { @@ -5091,7 +5332,7 @@ "nextToken": { "target": "com.amazonaws.bedrock#PaginationToken", "traits": { - "smithy.api#documentation": "

Continuation token for the next request to list the next set of results.

" + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" } }, "modelSummaries": { @@ -5250,7 +5491,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -5272,7 +5513,7 @@ "byCustomizationType": { "target": "com.amazonaws.bedrock#ModelCustomization", "traits": { - "smithy.api#documentation": "

Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#httpQuery": "byCustomizationType" } }, @@ -5286,7 +5527,7 @@ "byInferenceType": { "target": "com.amazonaws.bedrock#InferenceType", "traits": { - "smithy.api#documentation": "

Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#httpQuery": "byInferenceType" } } @@ -5400,6 +5641,145 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrock#ListModelCopyJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrock#ListModelCopyJobsRequest" + }, + "output": { + "target": "com.amazonaws.bedrock#ListModelCopyJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrock#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrock#InternalServerException" + }, + { + "target": "com.amazonaws.bedrock#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrock#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrock#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on\n one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/model-copy-jobs" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "modelCopyJobSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.bedrock#ListModelCopyJobsRequest": { + "type": "structure", + "members": { + "creationTimeAfter": { + "target": "com.amazonaws.bedrock#Timestamp", + "traits": { + "smithy.api#documentation": "

Filters for model copy jobs created after the specified time.

", + "smithy.api#httpQuery": "creationTimeAfter" + } + }, + "creationTimeBefore": { + "target": "com.amazonaws.bedrock#Timestamp", + "traits": { + "smithy.api#documentation": "

Filters for model copy jobs created before the specified time.

", + "smithy.api#httpQuery": "creationTimeBefore" + } + }, + "statusEquals": { + "target": "com.amazonaws.bedrock#ModelCopyJobStatus", + "traits": { + "smithy.api#documentation": "

Filters for model copy jobs whose status matches the value that you specify.

", + "smithy.api#httpQuery": "statusEquals" + } + }, + "sourceAccountEquals": { + "target": "com.amazonaws.bedrock#AccountId", + "traits": { + "smithy.api#documentation": "

Filters for model copy jobs in which the account that the source model belongs to is equal to the value that you specify.

", + "smithy.api#httpQuery": "sourceAccountEquals" + } + }, + "sourceModelArnEquals": { + "target": "com.amazonaws.bedrock#ModelArn", + "traits": { + "smithy.api#documentation": "

Filters for model copy jobs in which the Amazon Resource Name (ARN) of the source model to is equal to the value that you specify.

", + "smithy.api#httpQuery": "sourceModelArnEquals" + } + }, + "targetModelNameContains": { + "target": "com.amazonaws.bedrock#CustomModelName", + "traits": { + "smithy.api#documentation": "

Filters for model copy jobs in which the name of the copied model contains the string that you specify.

", + "smithy.api#httpQuery": "outputModelNameContains" + } + }, + "maxResults": { + "target": "com.amazonaws.bedrock#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrock#PaginationToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "sortBy": { + "target": "com.amazonaws.bedrock#SortJobsBy", + "traits": { + "smithy.api#documentation": "

The field to sort by in the returned list of model copy jobs.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "sortOrder": { + "target": "com.amazonaws.bedrock#SortOrder", + "traits": { + "smithy.api#documentation": "

Specifies whether to sort the results in ascending or descending order.

", + "smithy.api#httpQuery": "sortOrder" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrock#ListModelCopyJobsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.bedrock#PaginationToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + }, + "modelCopyJobSummaries": { + "target": "com.amazonaws.bedrock#ModelCopyJobSummaries", + "traits": { + "smithy.api#documentation": "

A list of information about each model copy job.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.bedrock#ListModelCustomizationJobs": { "type": "operation", "input": { @@ -5423,7 +5803,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on\n one or more criteria.

\n

For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on\n one or more criteria.

\n

For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -5472,14 +5852,14 @@ "maxResults": { "target": "com.amazonaws.bedrock#MaxResults", "traits": { - "smithy.api#documentation": "

Maximum number of results to return in the response.

", + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", "smithy.api#httpQuery": "maxResults" } }, "nextToken": { "target": "com.amazonaws.bedrock#PaginationToken", "traits": { - "smithy.api#documentation": "

Continuation token from the previous response, for Amazon Bedrock to list the next set of results.

", + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", "smithy.api#httpQuery": "nextToken" } }, @@ -5508,7 +5888,7 @@ "nextToken": { "target": "com.amazonaws.bedrock#PaginationToken", "traits": { - "smithy.api#documentation": "

Page continuation token to use in the next request.

" + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" } }, "modelCustomizationJobSummaries": { @@ -5545,7 +5925,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -5677,7 +6057,7 @@ } ], "traits": { - "smithy.api#documentation": "

List the tags associated with the specified resource.

\n

For more information, see Tagging resources in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

List the tags associated with the specified resource.

\n

For more information, see Tagging resources in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -5797,6 +6177,138 @@ "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))$" } }, + "com.amazonaws.bedrock#ModelCopyJobArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1011 + }, + "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:model-copy-job/[a-z0-9]{12}$" + } + }, + "com.amazonaws.bedrock#ModelCopyJobStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Completed" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + } + } + }, + "com.amazonaws.bedrock#ModelCopyJobSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrock#ModelCopyJobSummary" + } + }, + "com.amazonaws.bedrock#ModelCopyJobSummary": { + "type": "structure", + "members": { + "jobArn": { + "target": "com.amazonaws.bedrock#ModelCopyJobArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resoource Name (ARN) of the model copy job.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrock#ModelCopyJobStatus", + "traits": { + "smithy.api#documentation": "

The status of the model copy job.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.bedrock#Timestamp", + "traits": { + "smithy.api#documentation": "

The time that the model copy job was created.

", + "smithy.api#required": {} + } + }, + "targetModelArn": { + "target": "com.amazonaws.bedrock#CustomModelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the copied model.

", + "smithy.api#required": {} + } + }, + "targetModelName": { + "target": "com.amazonaws.bedrock#CustomModelName", + "traits": { + "smithy.api#documentation": "

The name of the copied model.

" + } + }, + "sourceAccountId": { + "target": "com.amazonaws.bedrock#AccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the account that the model being copied originated from.

", + "smithy.api#required": {} + } + }, + "sourceModelArn": { + "target": "com.amazonaws.bedrock#ModelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the original model being copied.

", + "smithy.api#required": {} + } + }, + "targetModelKmsKeyArn": { + "target": "com.amazonaws.bedrock#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the copied model.

" + } + }, + "targetModelTags": { + "target": "com.amazonaws.bedrock#TagList", + "traits": { + "smithy.api#documentation": "

Tags associated with the copied model.

" + } + }, + "failureMessage": { + "target": "com.amazonaws.bedrock#ErrorMessage", + "traits": { + "smithy.api#documentation": "

If a model fails to be copied, a message describing why the job failed is included here.

" + } + }, + "sourceModelName": { + "target": "com.amazonaws.bedrock#CustomModelName", + "traits": { + "smithy.api#documentation": "

The name of the original model being copied.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about each model copy job.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrock#ModelCopyResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrock#CreateModelCopyJob" + }, + { + "target": "com.amazonaws.bedrock#GetModelCopyJob" + }, + { + "target": "com.amazonaws.bedrock#ListModelCopyJobs" + } + ] + }, "com.amazonaws.bedrock#ModelCustomization": { "type": "enum", "members": { @@ -6531,7 +7043,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -6672,7 +7184,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -6776,7 +7288,7 @@ } }, "traits": { - "smithy.api#documentation": "

The request contains more tags than can be associated with a resource (50 tags per resource).\n The maximum number of tags includes both existing tags and those included in your current request.

", + "smithy.api#documentation": "

The request contains more tags than can be associated with a resource (50 tags per resource). \n The maximum number of tags includes both existing tags and those included in your current request.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -6853,7 +7365,7 @@ } ], "traits": { - "smithy.api#documentation": "

Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -7072,7 +7584,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "PATCH", diff --git a/models/cleanrooms.json b/models/cleanrooms.json index db7e7cfa50..ec1149fb1a 100644 --- a/models/cleanrooms.json +++ b/models/cleanrooms.json @@ -31,6 +31,12 @@ { "target": "com.amazonaws.cleanrooms#ConfiguredTableResource" }, + { + "target": "com.amazonaws.cleanrooms#IdMappingTableResource" + }, + { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationResource" + }, { "target": "com.amazonaws.cleanrooms#MembershipResource" }, @@ -783,6 +789,38 @@ "smithy.api#pattern": "^\\d+$" } }, + "com.amazonaws.cleanrooms#AdditionalAnalyses": { + "type": "enum", + "members": { + "ALLOWED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALLOWED" + } + }, + "REQUIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REQUIRED" + } + }, + "NOT_ALLOWED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_ALLOWED" + } + } + } + }, + "com.amazonaws.cleanrooms#AdditionalAnalysesResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:([\\d]{12}|\\*):membership/[\\*\\d\\w-]+/configuredaudiencemodelassociation/[\\*\\d\\w-]+$" + } + }, "com.amazonaws.cleanrooms#AggregateColumn": { "type": "structure", "members": { @@ -896,6 +934,18 @@ ] } }, + "com.amazonaws.cleanrooms#AllowedAdditionalAnalyses": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#AdditionalAnalysesResourceArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 25 + } + } + }, "com.amazonaws.cleanrooms#AllowedAnalysesList": { "type": "list", "member": { @@ -920,6 +970,12 @@ } } }, + "com.amazonaws.cleanrooms#AllowedResultReceivers": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#AccountId" + } + }, "com.amazonaws.cleanrooms#AnalysisFormat": { "type": "string", "traits": { @@ -1085,6 +1141,12 @@ "smithy.api#documentation": "

Columns that must meet a specific threshold value (after an aggregation function is\n applied to it) for each output row to be returned.

", "smithy.api#required": {} } + }, + "additionalAnalyses": { + "target": "com.amazonaws.cleanrooms#AdditionalAnalyses", + "traits": { + "smithy.api#documentation": "

An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query.

\n

The\n additionalAnalyses\n parameter is\n currently\n supported for the list analysis rule (AnalysisRuleList) and the custom\n analysis rule (AnalysisRuleCustom).

" + } } }, "traits": { @@ -1135,6 +1197,18 @@ } } }, + "additionalAnalyses": { + "target": "com.amazonaws.cleanrooms#AdditionalAnalyses", + "traits": { + "smithy.api#documentation": "

An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query.

" + } + }, + "disallowedOutputColumns": { + "target": "com.amazonaws.cleanrooms#AnalysisRuleColumnList", + "traits": { + "smithy.api#documentation": "

A list of columns that aren't allowed to be shown in the query output.

" + } + }, "differentialPrivacy": { "target": "com.amazonaws.cleanrooms#DifferentialPrivacyConfiguration", "traits": { @@ -1146,6 +1220,38 @@ "smithy.api#documentation": "

A type of analysis rule that enables the table owner to approve custom SQL queries on\n their configured tables. It supports differential privacy.

" } }, + "com.amazonaws.cleanrooms#AnalysisRuleIdMappingTable": { + "type": "structure", + "members": { + "joinColumns": { + "target": "com.amazonaws.cleanrooms#AnalysisRuleColumnList", + "traits": { + "smithy.api#documentation": "

The columns that query runners are allowed to use in an INNER JOIN statement.

", + "smithy.api#length": { + "min": 2, + "max": 2 + }, + "smithy.api#required": {} + } + }, + "queryConstraints": { + "target": "com.amazonaws.cleanrooms#QueryConstraintList", + "traits": { + "smithy.api#documentation": "

The query constraints of the analysis rule ID mapping table.

", + "smithy.api#required": {} + } + }, + "dimensionColumns": { + "target": "com.amazonaws.cleanrooms#AnalysisRuleColumnList", + "traits": { + "smithy.api#documentation": "

The columns that query runners are allowed to select, group by, or filter by.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines details for the analysis rule ID mapping table.

" + } + }, "com.amazonaws.cleanrooms#AnalysisRuleList": { "type": "structure", "members": { @@ -1171,6 +1277,12 @@ "smithy.api#documentation": "

Columns that can be listed in the output.

", "smithy.api#required": {} } + }, + "additionalAnalyses": { + "target": "com.amazonaws.cleanrooms#AdditionalAnalyses", + "traits": { + "smithy.api#documentation": "

An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query.

" + } } }, "traits": { @@ -1211,6 +1323,12 @@ "traits": { "smithy.api#documentation": "

Analysis rule type that enables custom SQL queries on a configured table.

" } + }, + "idMappingTable": { + "target": "com.amazonaws.cleanrooms#AnalysisRuleIdMappingTable", + "traits": { + "smithy.api#documentation": "

The ID mapping table.

" + } } }, "traits": { @@ -1237,6 +1355,12 @@ "traits": { "smithy.api#enumValue": "CUSTOM" } + }, + "ID_MAPPING_TABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ID_MAPPING_TABLE" + } } } }, @@ -1643,6 +1767,23 @@ } } }, + "com.amazonaws.cleanrooms#AnalysisType": { + "type": "enum", + "members": { + "DIRECT_ANALYSIS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIRECT_ANALYSIS" + } + }, + "ADDITIONAL_ANALYSIS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ADDITIONAL_ANALYSIS" + } + } + } + }, "com.amazonaws.cleanrooms#BatchGetCollaborationAnalysisTemplate": { "type": "operation", "input": { @@ -1979,7 +2120,7 @@ "names": { "target": "com.amazonaws.cleanrooms#TableAliasList", "traits": { - "smithy.api#documentation": "

The names for the schema objects to\n retrieve.

", + "smithy.api#documentation": "

The names for the schema objects to retrieve.

", "smithy.api#required": {} } } @@ -2473,6 +2614,184 @@ "smithy.api#pattern": "^(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*$" } }, + "com.amazonaws.cleanrooms#CollaborationIdNamespaceAssociation": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration ID namespace association.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationId" + } + }, + "arn": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationArn": { + "target": "com.amazonaws.cleanrooms#CollaborationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration that contains the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#GenericResourceName", + "traits": { + "smithy.api#documentation": "

The name of the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the collaboration ID namespace association.

" + } + }, + "creatorAccountId": { + "target": "com.amazonaws.cleanrooms#AccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Web Services account that created the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the collaboration ID namespace association was created.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The most recent time at which the collaboration ID namespace was updated.

", + "smithy.api#required": {} + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration that's necessary to create the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "inputReferenceProperties": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceProperties", + "traits": { + "smithy.api#documentation": "

The input reference properties that are needed to create the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "idMappingConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingConfig" + } + }, + "traits": { + "smithy.api#documentation": "

Defines details for the collaboration ID namespace association.

" + } + }, + "com.amazonaws.cleanrooms#CollaborationIdNamespaceAssociationSummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the collaboration ID namespace association was created.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The most recent time at which the collaboration ID namespace association was updated.

", + "smithy.api#required": {} + } + }, + "collaborationArn": { + "target": "com.amazonaws.cleanrooms#CollaborationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration that contains this collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains this collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "creatorAccountId": { + "target": "com.amazonaws.cleanrooms#AccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account that created this collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration that's used to create the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#GenericResourceName", + "traits": { + "smithy.api#documentation": "

The name of the collaboration ID namespace association.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the collaboration ID namepsace association.

" + } + }, + "inputReferenceProperties": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferencePropertiesSummary", + "traits": { + "smithy.api#documentation": "

The input reference properties that are used to create the collaboration ID namespace association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides summary information about the collaboration ID namespace association.

" + } + }, + "com.amazonaws.cleanrooms#CollaborationIdNamespaceAssociationSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#CollaborationIdNamespaceAssociationSummary" + } + }, "com.amazonaws.cleanrooms#CollaborationIdentifier": { "type": "string", "traits": { @@ -2781,6 +3100,9 @@ { "target": "com.amazonaws.cleanrooms#GetCollaborationConfiguredAudienceModelAssociation" }, + { + "target": "com.amazonaws.cleanrooms#GetCollaborationIdNamespaceAssociation" + }, { "target": "com.amazonaws.cleanrooms#GetCollaborationPrivacyBudgetTemplate" }, @@ -2796,6 +3118,9 @@ { "target": "com.amazonaws.cleanrooms#ListCollaborationConfiguredAudienceModelAssociations" }, + { + "target": "com.amazonaws.cleanrooms#ListCollaborationIdNamespaceAssociations" + }, { "target": "com.amazonaws.cleanrooms#ListCollaborationPrivacyBudgets" }, @@ -2949,6 +3274,20 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*$" } }, + "com.amazonaws.cleanrooms#ConfigurationDetails": { + "type": "union", + "members": { + "directAnalysisConfigurationDetails": { + "target": "com.amazonaws.cleanrooms#DirectAnalysisConfigurationDetails", + "traits": { + "smithy.api#documentation": "

The direct analysis configuration details.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details.

" + } + }, "com.amazonaws.cleanrooms#ConfiguredAudienceModelArn": { "type": "string", "traits": { @@ -3480,6 +3819,12 @@ "smithy.api#documentation": "

A description of the configured table association.

" } }, + "analysisRuleTypes": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleTypeList", + "traits": { + "smithy.api#documentation": "

The analysis rule types for the configured table association.

" + } + }, "createTime": { "target": "smithy.api#Timestamp", "traits": { @@ -3499,14 +3844,202 @@ "smithy.api#documentation": "

A configured table association links a configured table to a collaboration.

" } }, - "com.amazonaws.cleanrooms#ConfiguredTableAssociationArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 100 - }, - "smithy.api#pattern": "^arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredtableassociation/[\\d\\w-]+/[\\d\\w-]+$" - } + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRule": { + "type": "structure", + "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The membership identifier for the configured table association analysis rule.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "configuredTableAssociationId": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

\n The\n unique identifier for the configured table association.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "ConfiguredTableAssociationId" + } + }, + "configuredTableAssociationArn": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationArn", + "traits": { + "smithy.api#documentation": "

\n The\n Amazon Resource Name (ARN) of the configured table association.

", + "smithy.api#required": {} + } + }, + "policy": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRulePolicy", + "traits": { + "smithy.api#documentation": "

The policy of the configured table association analysis rule.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType", + "traits": { + "smithy.api#documentation": "

The type of the configured table association analysis rule.

", + "smithy.api#required": {} + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the configured table association analysis rule.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The update time of the configured table association analysis rule.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An\n analysis rule for a configured table association. This analysis rule specifies how data\n from the table can be used within its associated\n collaboration.\n In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the\n collaboration analysis rule.

" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleAggregation": { + "type": "structure", + "members": { + "allowedResultReceivers": { + "target": "com.amazonaws.cleanrooms#AllowedResultReceivers", + "traits": { + "smithy.api#documentation": "

The list of collaboration members who are allowed to receive results of queries run\n with this configured table.

" + } + }, + "allowedAdditionalAnalyses": { + "target": "com.amazonaws.cleanrooms#AllowedAdditionalAnalyses", + "traits": { + "smithy.api#documentation": "

The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.

\n

The\n allowedAdditionalAnalyses\n parameter\n is currently supported for the list analysis rule\n (AnalysisRuleList) and the custom analysis rule\n (AnalysisRuleCustom).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configured table association analysis rule applied to a configured table with the aggregation analysis rule.

" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleCustom": { + "type": "structure", + "members": { + "allowedResultReceivers": { + "target": "com.amazonaws.cleanrooms#AllowedResultReceivers", + "traits": { + "smithy.api#documentation": "

The list of\n collaboration members who are allowed\n to\n receive results of queries run with this configured table.

" + } + }, + "allowedAdditionalAnalyses": { + "target": "com.amazonaws.cleanrooms#AllowedAdditionalAnalyses", + "traits": { + "smithy.api#documentation": "

The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configured table association analysis rule applied to a configured table with the custom analysis rule.

" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleList": { + "type": "structure", + "members": { + "allowedResultReceivers": { + "target": "com.amazonaws.cleanrooms#AllowedResultReceivers", + "traits": { + "smithy.api#documentation": "

The list of collaboration members who are allowed to receive results of queries run\n with this configured table.

" + } + }, + "allowedAdditionalAnalyses": { + "target": "com.amazonaws.cleanrooms#AllowedAdditionalAnalyses", + "traits": { + "smithy.api#documentation": "

The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configured table association analysis rule applied to a configured table with the list analysis rule.

" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRulePolicy": { + "type": "union", + "members": { + "v1": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRulePolicyV1", + "traits": { + "smithy.api#documentation": "

The policy for the configured table association analysis rule.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Controls on the query specifications that can be run on an associated configured table.

" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRulePolicyV1": { + "type": "union", + "members": { + "list": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleList", + "traits": { + "smithy.api#documentation": "

Analysis rule type that enables only list queries on a configured table.

" + } + }, + "aggregation": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleAggregation", + "traits": { + "smithy.api#documentation": "

Analysis rule type that enables only aggregation queries on a configured table.

" + } + }, + "custom": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleCustom", + "traits": { + "smithy.api#documentation": "

Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Controls on the query specifications that can be run on an associated configured table.

" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType": { + "type": "enum", + "members": { + "AGGREGATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AGGREGATION" + } + }, + "LIST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LIST" + } + }, + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } + } + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType" + } + }, + "com.amazonaws.cleanrooms#ConfiguredTableAssociationArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 100 + }, + "smithy.api#pattern": "^arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredtableassociation/[\\d\\w-]+/[\\d\\w-]+$" + } }, "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier": { "type": "string", @@ -3543,6 +4076,20 @@ "list": { "target": "com.amazonaws.cleanrooms#ListConfiguredTableAssociations" }, + "operations": [ + { + "target": "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationAnalysisRule" + }, + { + "target": "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationAnalysisRule" + }, + { + "target": "com.amazonaws.cleanrooms#GetConfiguredTableAssociationAnalysisRule" + }, + { + "target": "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationAnalysisRule" + } + ], "traits": { "aws.api#arn": { "template": "membership/{MembershipId}/configuredtableassociation/{ConfiguredTableAssociationId}" @@ -4001,7 +4548,7 @@ "creatorPaymentConfiguration": { "target": "com.amazonaws.cleanrooms#PaymentConfiguration", "traits": { - "smithy.api#documentation": "

The collaboration\n creator's payment responsibilities set by the collaboration creator.

\n

If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer.

" + "smithy.api#documentation": "

The collaboration creator's payment responsibilities set by the collaboration creator.

\n

If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer.

" } } } @@ -4012,7 +4559,7 @@ "collaboration": { "target": "com.amazonaws.cleanrooms#Collaboration", "traits": { - "smithy.api#documentation": "

The entire created collaboration object.

", + "smithy.api#documentation": "

The collaboration.

", "smithy.api#required": {} } } @@ -4227,7 +4774,7 @@ "analysisRulePolicy": { "target": "com.amazonaws.cleanrooms#ConfiguredTableAnalysisRulePolicy", "traits": { - "smithy.api#documentation": "

The entire created configured table analysis rule object.

", + "smithy.api#documentation": "

The analysis rule policy that was created for the configured table.

", "smithy.api#required": {} } } @@ -4239,7 +4786,7 @@ "analysisRule": { "target": "com.amazonaws.cleanrooms#ConfiguredTableAnalysisRule", "traits": { - "smithy.api#documentation": "

The entire created analysis rule.

", + "smithy.api#documentation": "

The analysis rule that was created for the configured table.

", "smithy.api#required": {} } } @@ -4289,6 +4836,102 @@ } } }, + "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationAnalysisRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationAnalysisRuleInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationAnalysisRuleOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants permission to create analysis rules for configured table association" + }, + "smithy.api#documentation": "

Creates a new analysis rule for an associated configured table.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationAnalysisRuleInput": { + "type": "structure", + "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "configuredTableAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique ID for the configured table association. Currently accepts the\n configured table association ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "ConfiguredTableAssociationId" + } + }, + "analysisRuleType": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType", + "traits": { + "smithy.api#documentation": "

The type of analysis rule.

", + "smithy.api#required": {} + } + }, + "analysisRulePolicy": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRulePolicy", + "traits": { + "smithy.api#documentation": "

The analysis rule policy that was created for the configured table\n association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationAnalysisRuleOutput": { + "type": "structure", + "members": { + "analysisRule": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRule", + "traits": { + "smithy.api#documentation": "

The analysis rule for the configured table association.\n In the console, the\n ConfiguredTableAssociationAnalysisRule is referred to as the\n collaboration analysis rule.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#CreateConfiguredTableAssociationInput": { "type": "structure", "members": { @@ -4342,7 +4985,7 @@ "configuredTableAssociation": { "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociation", "traits": { - "smithy.api#documentation": "

The entire configured table association object.

", + "smithy.api#documentation": "

The configured table association.

", "smithy.api#required": {} } } @@ -4405,13 +5048,13 @@ } } }, - "com.amazonaws.cleanrooms#CreateMembership": { + "com.amazonaws.cleanrooms#CreateIdMappingTable": { "type": "operation", "input": { - "target": "com.amazonaws.cleanrooms#CreateMembershipInput" + "target": "com.amazonaws.cleanrooms#CreateIdMappingTableInput" }, "output": { - "target": "com.amazonaws.cleanrooms#CreateMembershipOutput" + "target": "com.amazonaws.cleanrooms#CreateIdMappingTableOutput" }, "errors": [ { @@ -4437,33 +5080,243 @@ } ], "traits": { - "aws.iam#actionPermissionDescription": "Grants permission to join collaborations by creating a membership", - "smithy.api#documentation": "

Creates a membership for a specific collaboration identifier and joins the\n collaboration.

", + "aws.iam#actionPermissionDescription": "Grants permission to link an id mapping workflow with a collaboration by creating a new id mapping table", + "smithy.api#documentation": "

Creates an ID mapping table.

", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/memberships" + "uri": "/memberships/{membershipIdentifier}/idmappingtables" } } }, - "com.amazonaws.cleanrooms#CreateMembershipInput": { + "com.amazonaws.cleanrooms#CreateIdMappingTableInput": { "type": "structure", "members": { - "collaborationIdentifier": { - "target": "com.amazonaws.cleanrooms#CollaborationIdentifier", + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", "traits": { - "smithy.api#documentation": "

The unique ID for the associated collaboration.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID mapping table.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" } }, - "queryLogStatus": { - "target": "com.amazonaws.cleanrooms#MembershipQueryLogStatus", + "name": { + "target": "com.amazonaws.cleanrooms#ResourceAlias", "traits": { - "smithy.api#documentation": "

An indicator as to whether query logging has been enabled or disabled for the\n membership.

", + "smithy.api#documentation": "

A name for the ID mapping table.

", "smithy.api#required": {} } }, - "tags": { + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

A description of the ID mapping table.

" + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration needed to create the ID mapping table.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.cleanrooms#TagMap", + "traits": { + "smithy.api#documentation": "

An optional label that you can assign to a resource when you create it. Each tag\n consists of a key and an optional value, both of which you define. When you use tagging,\n you can also use tag-based access control in IAM policies to control access\n to this resource.

" + } + }, + "kmsKeyArn": { + "target": "com.amazonaws.cleanrooms#KMSKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This value is used to encrypt the mapping table data that is stored by Clean Rooms.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#CreateIdMappingTableOutput": { + "type": "structure", + "members": { + "idMappingTable": { + "target": "com.amazonaws.cleanrooms#IdMappingTable", + "traits": { + "smithy.api#documentation": "

The ID mapping table that was created.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#CreateIdNamespaceAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#CreateIdNamespaceAssociationInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#CreateIdNamespaceAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to link an AWS Entity Resolution Id Namespace with a collaboration by creating a new association", + "smithy.api#documentation": "

Creates an ID namespace association.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/memberships/{membershipIdentifier}/idnamespaceassociations" + } + } + }, + "com.amazonaws.cleanrooms#CreateIdNamespaceAssociationInput": { + "type": "structure", + "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID namespace association.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration needed to create the ID namespace association.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.cleanrooms#TagMap", + "traits": { + "smithy.api#documentation": "

An optional label that you can assign to a resource when you create it. Each tag\n consists of a key and an optional value, both of which you define. When you use tagging,\n you can also use tag-based access control in IAM policies to control access\n to this resource.

" + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#GenericResourceName", + "traits": { + "smithy.api#documentation": "

The name for the ID namespace association.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the ID namespace association.

" + } + }, + "idMappingConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingConfig", + "traits": { + "smithy.api#documentation": "

The configuration settings for the ID mapping table.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#CreateIdNamespaceAssociationOutput": { + "type": "structure", + "members": { + "idNamespaceAssociation": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociation", + "traits": { + "smithy.api#documentation": "

The ID namespace association that was created.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#CreateMembership": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#CreateMembershipInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#CreateMembershipOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to join collaborations by creating a membership", + "smithy.api#documentation": "

Creates a membership for a specific collaboration identifier and joins the\n collaboration.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/memberships" + } + } + }, + "com.amazonaws.cleanrooms#CreateMembershipInput": { + "type": "structure", + "members": { + "collaborationIdentifier": { + "target": "com.amazonaws.cleanrooms#CollaborationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique ID for the associated collaboration.

", + "smithy.api#required": {} + } + }, + "queryLogStatus": { + "target": "com.amazonaws.cleanrooms#MembershipQueryLogStatus", + "traits": { + "smithy.api#documentation": "

An indicator as to whether query logging has been enabled or disabled for the\n membership.

", + "smithy.api#required": {} + } + }, + "tags": { "target": "com.amazonaws.cleanrooms#TagMap", "traits": { "smithy.api#documentation": "

An optional label that you can assign to a resource when you create it. Each tag\n consists of a key and an optional value, both of which you define. When you use tagging,\n you can also use tag-based access control in IAM policies to control access\n to this resource.

" @@ -4472,13 +5325,13 @@ "defaultResultConfiguration": { "target": "com.amazonaws.cleanrooms#MembershipProtectedQueryResultConfiguration", "traits": { - "smithy.api#documentation": "

The default\n protected query result configuration as specified by the member who can receive\n results.

" + "smithy.api#documentation": "

The default protected query result configuration as specified by the member who can\n receive results.

" } }, "paymentConfiguration": { "target": "com.amazonaws.cleanrooms#MembershipPaymentConfiguration", "traits": { - "smithy.api#documentation": "

The payment\n responsibilities accepted by the collaboration member.

\n

Not required if the collaboration member has the member ability to run queries.

\n

Required if the collaboration member doesn't have the member ability to run queries but\n is configured as a payer by the collaboration creator.

" + "smithy.api#documentation": "

The payment responsibilities accepted by the collaboration member.

\n

Not required if the collaboration member has the member ability to run queries.

\n

Required if the collaboration member doesn't have the member ability to run queries but\n is configured as a payer by the collaboration creator.

" } } } @@ -4598,28 +5451,28 @@ "allowCleartext": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether encrypted tables can contain cleartext data\n (TRUE)\n or are to cryptographically process every column\n (FALSE).

", + "smithy.api#documentation": "

Indicates whether encrypted tables can contain cleartext data (TRUE) or are\n to cryptographically process every column (FALSE).

", "smithy.api#required": {} } }, "allowDuplicates": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether Fingerprint columns can contain duplicate entries\n (TRUE)\n or are to contain only non-repeated values\n (FALSE).

", + "smithy.api#documentation": "

Indicates whether Fingerprint columns can contain duplicate entries (TRUE)\n or are to contain only non-repeated values (FALSE).

", "smithy.api#required": {} } }, "allowJoinsOnColumnsWithDifferentNames": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether Fingerprint columns can be joined on any other Fingerprint column with\n a different name\n (TRUE)\n or can only be joined on Fingerprint columns of the same name\n (FALSE).

", + "smithy.api#documentation": "

Indicates whether Fingerprint columns can be joined on any other Fingerprint column with\n a different name (TRUE) or can only be joined on Fingerprint columns of the\n same name (FALSE).

", "smithy.api#required": {} } }, "preserveNulls": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether NULL values are to be copied as NULL to encrypted tables\n (TRUE)\n or cryptographically processed\n (FALSE).

", + "smithy.api#documentation": "

Indicates whether NULL values are to be copied as NULL to encrypted tables\n (TRUE) or cryptographically processed (FALSE).

", "smithy.api#required": {} } } @@ -4957,6 +5810,88 @@ "smithy.api#idempotent": {} } }, + "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationAnalysisRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationAnalysisRuleInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationAnalysisRuleOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants permission to delete an existing configured table association analysis rule" + }, + "smithy.api#documentation": "

Deletes\n an analysis rule for a configured table association.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationAnalysisRuleInput": { + "type": "structure", + "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "configuredTableAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The\n identifier for the configured table association that's related to the analysis rule that you\n want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "ConfiguredTableAssociationId" + } + }, + "analysisRuleType": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType", + "traits": { + "smithy.api#documentation": "

The\n type of the analysis rule that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationAnalysisRuleOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#DeleteConfiguredTableAssociationInput": { "type": "structure", "members": { @@ -5005,21 +5940,18 @@ "smithy.api#documentation": "

The empty output for a successful deletion.

" } }, - "com.amazonaws.cleanrooms#DeleteMember": { + "com.amazonaws.cleanrooms#DeleteIdMappingTable": { "type": "operation", "input": { - "target": "com.amazonaws.cleanrooms#DeleteMemberInput" + "target": "com.amazonaws.cleanrooms#DeleteIdMappingTableInput" }, "output": { - "target": "com.amazonaws.cleanrooms#DeleteMemberOutput" + "target": "com.amazonaws.cleanrooms#DeleteIdMappingTableOutput" }, "errors": [ { "target": "com.amazonaws.cleanrooms#AccessDeniedException" }, - { - "target": "com.amazonaws.cleanrooms#ConflictException" - }, { "target": "com.amazonaws.cleanrooms#InternalServerException" }, @@ -5034,12 +5966,153 @@ } ], "traits": { - "aws.iam#actionPermissionDescription": "Grants permission to delete members from a collaboration", - "smithy.api#documentation": "

Removes the specified member from a collaboration. The removed member is placed in the\n Removed status and can't interact with the collaboration. The removed member's data is\n inaccessible to active members of the collaboration.

", + "aws.iam#actionPermissionDescription": "Grants permission to remove an id mapping table from a collaboration", + "smithy.api#documentation": "

Deletes an ID mapping table.

", "smithy.api#http": { "code": 204, "method": "DELETE", - "uri": "/collaborations/{collaborationIdentifier}/member/{accountId}" + "uri": "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cleanrooms#DeleteIdMappingTableInput": { + "type": "structure", + "members": { + "idMappingTableIdentifier": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID mapping table that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdMappingTableId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID mapping table that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#DeleteIdMappingTableOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#DeleteIdNamespaceAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#DeleteIdNamespaceAssociationInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#DeleteIdNamespaceAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to remove an Id Namespace Association from a collaboration", + "smithy.api#documentation": "

Deletes an ID namespace association.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cleanrooms#DeleteIdNamespaceAssociationInput": { + "type": "structure", + "members": { + "idNamespaceAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID namespace association that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID namespace association that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#DeleteIdNamespaceAssociationOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#DeleteMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#DeleteMemberInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#DeleteMemberOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to delete members from a collaboration", + "smithy.api#documentation": "

Removes the specified member from a collaboration. The removed member is placed in the\n Removed status and can't interact with the collaboration. The removed member's data is\n inaccessible to active members of the collaboration.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/collaborations/{collaborationIdentifier}/member/{accountId}" }, "smithy.api#idempotent": {} } @@ -5541,6 +6614,20 @@ "smithy.api#documentation": "

The epsilon and noise parameter values that you want to update in the differential privacy template.

" } }, + "com.amazonaws.cleanrooms#DirectAnalysisConfigurationDetails": { + "type": "structure", + "members": { + "receiverAccountIds": { + "target": "com.amazonaws.cleanrooms#ReceiverAccountIds", + "traits": { + "smithy.api#documentation": "

The account IDs for the member who received the results of a protected query.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The direct analysis configuration details.

" + } + }, "com.amazonaws.cleanrooms#DisplayName": { "type": "string", "traits": { @@ -5575,6 +6662,16 @@ ] } }, + "com.amazonaws.cleanrooms#GenericResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*$" + } + }, "com.amazonaws.cleanrooms#GetAnalysisTemplate": { "type": "operation", "input": { @@ -5827,6 +6924,83 @@ "smithy.api#output": {} } }, + "com.amazonaws.cleanrooms#GetCollaborationIdNamespaceAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#GetCollaborationIdNamespaceAssociationInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#GetCollaborationIdNamespaceAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get id namespace association within a collaboration", + "smithy.api#documentation": "

Retrieves an ID namespace association from a specific collaboration.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/collaborations/{collaborationIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#GetCollaborationIdNamespaceAssociationInput": { + "type": "structure", + "members": { + "collaborationIdentifier": { + "target": "com.amazonaws.cleanrooms#CollaborationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains the ID namespace association that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "CollaborationId" + } + }, + "idNamespaceAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID namespace association that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#GetCollaborationIdNamespaceAssociationOutput": { + "type": "structure", + "members": { + "collaborationIdNamespaceAssociation": { + "target": "com.amazonaws.cleanrooms#CollaborationIdNamespaceAssociation", + "traits": { + "smithy.api#documentation": "

The ID namespace association that you requested.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#GetCollaborationInput": { "type": "structure", "members": { @@ -6149,34 +7323,121 @@ "smithy.api#readonly": {} } }, - "com.amazonaws.cleanrooms#GetConfiguredTableAssociationInput": { + "com.amazonaws.cleanrooms#GetConfiguredTableAssociationAnalysisRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#GetConfiguredTableAssociationAnalysisRuleInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#GetConfiguredTableAssociationAnalysisRuleOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants permission to get analysis rules for a configured table association" + }, + "smithy.api#documentation": "

\n Retrieves\n the analysis rule for a configured table association.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#GetConfiguredTableAssociationAnalysisRuleInput": { "type": "structure", "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, "configuredTableAssociationIdentifier": { "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier", "traits": { - "smithy.api#documentation": "

The unique ID for the configured table association to retrieve. Currently accepts the\n configured table ID.

", + "smithy.api#documentation": "

\n The\n identifier for the configured table association that's related to the analysis\n rule.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.api#resourceIdentifier": "ConfiguredTableAssociationId" } }, - "membershipIdentifier": { - "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "analysisRuleType": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType", "traits": { - "smithy.api#documentation": "

A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID.

", + "smithy.api#documentation": "

\n The\n type of analysis rule that you want to retrieve.

", "smithy.api#httpLabel": {}, - "smithy.api#required": {}, - "smithy.api#resourceIdentifier": "MembershipId" + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.cleanrooms#GetConfiguredTableAssociationOutput": { + "com.amazonaws.cleanrooms#GetConfiguredTableAssociationAnalysisRuleOutput": { "type": "structure", "members": { - "configuredTableAssociation": { - "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociation", + "analysisRule": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRule", + "traits": { + "smithy.api#documentation": "

The\n analysis rule for the configured table association. In the console, the\n ConfiguredTableAssociationAnalysisRule is referred to as the\n collaboration analysis rule.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#GetConfiguredTableAssociationInput": { + "type": "structure", + "members": { + "configuredTableAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique ID for the configured table association to retrieve. Currently accepts the\n configured table ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "ConfiguredTableAssociationId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + } + } + }, + "com.amazonaws.cleanrooms#GetConfiguredTableAssociationOutput": { + "type": "structure", + "members": { + "configuredTableAssociation": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociation", "traits": { "smithy.api#documentation": "

The entire configured table association object.

", "smithy.api#required": {} @@ -6210,6 +7471,160 @@ } } }, + "com.amazonaws.cleanrooms#GetIdMappingTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#GetIdMappingTableInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#GetIdMappingTableOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to view details of an id mapping table", + "smithy.api#documentation": "

Retrieves an ID mapping table.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#GetIdMappingTableInput": { + "type": "structure", + "members": { + "idMappingTableIdentifier": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID mapping table identifier that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdMappingTableId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID mapping table that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#GetIdMappingTableOutput": { + "type": "structure", + "members": { + "idMappingTable": { + "target": "com.amazonaws.cleanrooms#IdMappingTable", + "traits": { + "smithy.api#documentation": "

The ID mapping table that you requested.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#GetIdNamespaceAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#GetIdNamespaceAssociationInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#GetIdNamespaceAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to view details of an id namespace association", + "smithy.api#documentation": "

Retrieves an ID namespace association.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#GetIdNamespaceAssociationInput": { + "type": "structure", + "members": { + "idNamespaceAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID namespace association that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID namespace association that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#GetIdNamespaceAssociationOutput": { + "type": "structure", + "members": { + "idNamespaceAssociation": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociation", + "traits": { + "smithy.api#documentation": "

The ID namespace association that you requested.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#GetMembership": { "type": "operation", "input": { @@ -6588,23 +8003,720 @@ "com.amazonaws.cleanrooms#GlueTableReference": { "type": "structure", "members": { - "tableName": { - "target": "com.amazonaws.cleanrooms#GlueTableName", + "tableName": { + "target": "com.amazonaws.cleanrooms#GlueTableName", + "traits": { + "smithy.api#documentation": "

The name of the Glue table.

", + "smithy.api#required": {} + } + }, + "databaseName": { + "target": "com.amazonaws.cleanrooms#GlueDatabaseName", + "traits": { + "smithy.api#documentation": "

The name of the database the Glue table belongs to.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A reference to a table within an Glue data catalog.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingConfig": { + "type": "structure", + "members": { + "allowUseAsDimensionColumn": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

An indicator as to whether you can use your column as a dimension column in the ID mapping table (TRUE) or not (FALSE).

\n

Default is FALSE.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration settings for the ID mapping table.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTable": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID mapping table.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdMappingTableId" + } + }, + "arn": { + "target": "com.amazonaws.cleanrooms#IdMappingTableArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ID mapping table.

", + "smithy.api#required": {} + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration for the ID mapping table.

", + "smithy.api#required": {} + } + }, + "membershipId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership resource for the ID mapping table.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "membershipArn": { + "target": "com.amazonaws.cleanrooms#MembershipArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the membership resource for the ID mapping table.

", + "smithy.api#required": {} + } + }, + "collaborationId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains this ID mapping table.

", + "smithy.api#required": {} + } + }, + "collaborationArn": { + "target": "com.amazonaws.cleanrooms#CollaborationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the ID mapping table.

" + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#ResourceAlias", + "traits": { + "smithy.api#documentation": "

The name of the ID mapping table.

", + "smithy.api#required": {} + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the ID mapping table was created.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The most recent time at which the ID mapping table was updated.

", + "smithy.api#required": {} + } + }, + "inputReferenceProperties": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputReferenceProperties", + "traits": { + "smithy.api#documentation": "

The input reference properties for the ID mapping table.

", + "smithy.api#required": {} + } + }, + "kmsKeyArn": { + "target": "com.amazonaws.cleanrooms#KMSKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services KMS key.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes information about the ID mapping table.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 200 + }, + "smithy.api#pattern": "^arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/idmappingtable/[\\d\\w-]+$" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableInputReferenceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(idmappingworkflow/.*)$" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableInputReferenceConfig": { + "type": "structure", + "members": { + "inputReferenceArn": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputReferenceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the referenced resource in Entity Resolution. Valid values are ID mapping workflow ARNs.

", + "smithy.api#required": {} + } + }, + "manageResourcePolicies": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

When TRUE, Clean Rooms manages permissions for the ID mapping table resource.

\n

When FALSE, the resource owner manages permissions for the ID mapping table resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides the input reference configuration for the ID mapping table.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableInputReferenceProperties": { + "type": "structure", + "members": { + "idMappingTableInputSource": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputSourceList", + "traits": { + "smithy.api#documentation": "

The input source of the ID mapping table.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The input reference properties for the ID mapping table.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableInputSource": { + "type": "structure", + "members": { + "idNamespaceAssociationId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID namespace association.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.cleanrooms#IdNamespaceType", + "traits": { + "smithy.api#documentation": "

The type of the input source of the ID mapping table.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The input source of the ID mapping table.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableInputSourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputSource" + }, + "traits": { + "smithy.api#length": { + "min": 2, + "max": 2 + } + } + }, + "com.amazonaws.cleanrooms#IdMappingTableResource": { + "type": "resource", + "identifiers": { + "MembershipId": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier" + }, + "IdMappingTableId": { + "target": "com.amazonaws.cleanrooms#UUID" + } + }, + "create": { + "target": "com.amazonaws.cleanrooms#CreateIdMappingTable" + }, + "read": { + "target": "com.amazonaws.cleanrooms#GetIdMappingTable" + }, + "update": { + "target": "com.amazonaws.cleanrooms#UpdateIdMappingTable" + }, + "delete": { + "target": "com.amazonaws.cleanrooms#DeleteIdMappingTable" + }, + "list": { + "target": "com.amazonaws.cleanrooms#ListIdMappingTables" + }, + "operations": [ + { + "target": "com.amazonaws.cleanrooms#PopulateIdMappingTable" + } + ], + "traits": { + "aws.api#arn": { + "template": "membership/{MembershipId}/idmappingtable/{IdMappingTableId}" + }, + "aws.iam#disableConditionKeyInference": {}, + "aws.iam#iamResource": { + "name": "idmappingtable" + }, + "smithy.api#documentation": "Represents an Id Mapping Workflow associate with a collaboration" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableSchemaTypeProperties": { + "type": "structure", + "members": { + "idMappingTableInputSource": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputSourceList", + "traits": { + "smithy.api#documentation": "

Defines which ID namespace associations are used to create the ID mapping table.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional properties that are specific to the type of the associated schema.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableSummary": { + "type": "structure", + "members": { + "collaborationArn": { + "target": "com.amazonaws.cleanrooms#CollaborationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table.

", + "smithy.api#required": {} + } + }, + "collaborationId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains this ID mapping table.

", + "smithy.api#required": {} + } + }, + "membershipId": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership resource for this ID mapping table.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "membershipArn": { + "target": "com.amazonaws.cleanrooms#MembershipArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the membership resource for this ID mapping table.

", + "smithy.api#required": {} + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which this ID mapping table was created.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The most recent time at which this ID mapping table was updated.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of this ID mapping table.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdMappingTableId" + } + }, + "arn": { + "target": "com.amazonaws.cleanrooms#IdMappingTableArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of this ID mapping table.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of this ID mapping table.

" + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingTableInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration for the ID mapping table.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#ResourceAlias", + "traits": { + "smithy.api#documentation": "

The name of this ID mapping table.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Detailed information about the ID mapping table.

" + } + }, + "com.amazonaws.cleanrooms#IdMappingTableSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#IdMappingTableSummary" + } + }, + "com.amazonaws.cleanrooms#IdMappingWorkflowsSupported": { + "type": "list", + "member": { + "target": "smithy.api#Document" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociation": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier for this ID namespace association.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationIdentifier" + } + }, + "arn": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ID namespace association.

", + "smithy.api#required": {} + } + }, + "membershipId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership resource for this ID namespace association.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "membershipArn": { + "target": "com.amazonaws.cleanrooms#MembershipArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the membership resource for this ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains this ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationArn": { + "target": "com.amazonaws.cleanrooms#CollaborationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#GenericResourceName", + "traits": { + "smithy.api#documentation": "

The name of this ID namespace association.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the ID namespace association.

" + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the ID namespace association was created.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The most recent time at which the ID namespace association was updated.

", + "smithy.api#required": {} + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration for the ID namespace association.

", + "smithy.api#required": {} + } + }, + "inputReferenceProperties": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceProperties", + "traits": { + "smithy.api#documentation": "

The input reference properties for the ID namespace association.

", + "smithy.api#required": {} + } + }, + "idMappingConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingConfig", + "traits": { + "smithy.api#documentation": "

The configuration settings for the ID mapping table.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information to create the ID namespace association.

" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/idnamespaceassociation/[\\d\\w-]+$" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^arn:aws:entityresolution:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:idnamespace/[\\d\\w-]+$" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceConfig": { + "type": "structure", + "members": { + "inputReferenceArn": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own.

", + "smithy.api#required": {} + } + }, + "manageResourcePolicies": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

When TRUE, Clean Rooms manages permissions for the ID namespace association resource.

\n

When FALSE, the resource owner manages permissions for the ID namespace association resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides the information for the ID namespace association input reference configuration.

" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceProperties": { + "type": "structure", + "members": { + "idNamespaceType": { + "target": "com.amazonaws.cleanrooms#IdNamespaceType", + "traits": { + "smithy.api#documentation": "

The ID namespace type for this ID namespace association.

", + "smithy.api#required": {} + } + }, + "idMappingWorkflowsSupported": { + "target": "com.amazonaws.cleanrooms#IdMappingWorkflowsSupported", + "traits": { + "smithy.api#documentation": "

Defines how ID mapping workflows are supported for this ID namespace association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides the information for the ID namespace association input reference properties.

" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferencePropertiesSummary": { + "type": "structure", + "members": { + "idNamespaceType": { + "target": "com.amazonaws.cleanrooms#IdNamespaceType", + "traits": { + "smithy.api#documentation": "

The ID namespace type for this ID namespace association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Detailed information about the ID namespace association input reference properties.

" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationResource": { + "type": "resource", + "identifiers": { + "MembershipId": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier" + }, + "IdNamespaceAssociationId": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier" + } + }, + "create": { + "target": "com.amazonaws.cleanrooms#CreateIdNamespaceAssociation" + }, + "read": { + "target": "com.amazonaws.cleanrooms#GetIdNamespaceAssociation" + }, + "update": { + "target": "com.amazonaws.cleanrooms#UpdateIdNamespaceAssociation" + }, + "delete": { + "target": "com.amazonaws.cleanrooms#DeleteIdNamespaceAssociation" + }, + "list": { + "target": "com.amazonaws.cleanrooms#ListIdNamespaceAssociations" + }, + "traits": { + "aws.api#arn": { + "template": "membership/{MembershipId}/idnamespaceassociation/{IdNamespaceAssociationId}" + }, + "aws.iam#disableConditionKeyInference": {}, + "aws.iam#iamResource": { + "name": "idnamespaceassociation" + }, + "smithy.api#documentation": "Represents an AWS Entity Resolution Id Namespace resource that has been associated with the collaboration for use in a entity resolution workflow as a data input" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationSummary": { + "type": "structure", + "members": { + "membershipId": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership resource for this ID namespace association.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "membershipArn": { + "target": "com.amazonaws.cleanrooms#MembershipArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the membership resource for this ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationArn": { + "target": "com.amazonaws.cleanrooms#CollaborationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association.

", + "smithy.api#required": {} + } + }, + "collaborationId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains this ID namespace association.

", + "smithy.api#required": {} + } + }, + "createTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which this ID namespace association was created.

", + "smithy.api#required": {} + } + }, + "updateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The most recent time at which this ID namespace association has been updated.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of this ID namespace association.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationId" + } + }, + "arn": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of this ID namespace association.

", + "smithy.api#required": {} + } + }, + "inputReferenceConfig": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferenceConfig", + "traits": { + "smithy.api#documentation": "

The input reference configuration details for this ID namespace association.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#GenericResourceName", + "traits": { + "smithy.api#documentation": "

The name of the ID namespace association.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the ID namespace association.

" + } + }, + "inputReferenceProperties": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationInputReferencePropertiesSummary", + "traits": { + "smithy.api#documentation": "

The input reference properties for this ID namespace association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Detailed information about the ID namespace association.

" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceAssociationSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationSummary" + } + }, + "com.amazonaws.cleanrooms#IdNamespaceType": { + "type": "enum", + "members": { + "SOURCE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the Glue table.

", - "smithy.api#required": {} + "smithy.api#enumValue": "SOURCE" } }, - "databaseName": { - "target": "com.amazonaws.cleanrooms#GlueDatabaseName", + "TARGET": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the database the Glue table belongs to.

", - "smithy.api#required": {} + "smithy.api#enumValue": "TARGET" } } - }, - "traits": { - "smithy.api#documentation": "

A reference to a table within an Glue data catalog.

" } }, "com.amazonaws.cleanrooms#InternalServerException": { @@ -6657,6 +8769,16 @@ ] } }, + "com.amazonaws.cleanrooms#KMSKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws:kms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:key/[a-zA-Z0-9-]+$" + } + }, "com.amazonaws.cleanrooms#KeyPrefix": { "type": "string", "traits": { @@ -6930,6 +9052,100 @@ "smithy.api#output": {} } }, + "com.amazonaws.cleanrooms#ListCollaborationIdNamespaceAssociations": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#ListCollaborationIdNamespaceAssociationsInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#ListCollaborationIdNamespaceAssociationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list id namespace within a collaboration", + "smithy.api#documentation": "

Returns a list of the ID namespace associations in a collaboration.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/collaborations/{collaborationIdentifier}/idnamespaceassociations" + }, + "smithy.api#paginated": { + "items": "collaborationIdNamespaceAssociationSummaries", + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#ListCollaborationIdNamespaceAssociationsInput": { + "type": "structure", + "members": { + "collaborationIdentifier": { + "target": "com.amazonaws.cleanrooms#CollaborationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the collaboration that contains the ID namespace associations that you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "CollaborationId" + } + }, + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", + "traits": { + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.cleanrooms#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.>

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#ListCollaborationIdNamespaceAssociationsOutput": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token value provided to access the next page of results.

" + } + }, + "collaborationIdNamespaceAssociationSummaries": { + "target": "com.amazonaws.cleanrooms#CollaborationIdNamespaceAssociationSummaryList", + "traits": { + "smithy.api#documentation": "

The summary information of the collaboration ID namespace associations that you requested.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#ListCollaborationPrivacyBudgetTemplates": { "type": "operation", "input": { @@ -7302,10 +9518,167 @@ "com.amazonaws.cleanrooms#ListConfiguredTableAssociations": { "type": "operation", "input": { - "target": "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsInput" + "target": "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list available configured table associations for a membership", + "smithy.api#documentation": "

Lists configured table associations for a membership.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/memberships/{membershipIdentifier}/configuredTableAssociations" + }, + "smithy.api#paginated": { + "items": "configuredTableAssociationSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsInput": { + "type": "structure", + "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the membership to list configured table associations for.\n Currently accepts the membership ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.cleanrooms#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum size of the results that is returned per call.

", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsOutput": { + "type": "structure", + "members": { + "configuredTableAssociationSummaries": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationSummaryList", + "traits": { + "smithy.api#documentation": "

The retrieved list of configured table associations.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

" + } + } + } + }, + "com.amazonaws.cleanrooms#ListConfiguredTables": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#ListConfiguredTablesInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#ListConfiguredTablesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list available configured tables", + "smithy.api#documentation": "

Lists configured tables.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/configuredTables" + }, + "smithy.api#paginated": { + "items": "configuredTableSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.cleanrooms#ListConfiguredTablesInput": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.cleanrooms#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum size of the results that is returned per call.

", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.cleanrooms#ListConfiguredTablesOutput": { + "type": "structure", + "members": { + "configuredTableSummaries": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableSummaryList", + "traits": { + "smithy.api#documentation": "

The configured tables listed by the request.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

" + } + } + } + }, + "com.amazonaws.cleanrooms#ListIdMappingTables": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#ListIdMappingTablesInput" }, "output": { - "target": "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsOutput" + "target": "com.amazonaws.cleanrooms#ListIdMappingTablesOutput" }, "errors": [ { @@ -7325,26 +9698,29 @@ } ], "traits": { - "aws.iam#actionPermissionDescription": "Grants permission to list available configured table associations for a membership", - "smithy.api#documentation": "

Lists configured table associations for a membership.

", + "aws.iam#actionPermissionDescription": "Grants permission to list available id mapping tables for a membership", + "smithy.api#documentation": "

Returns a list of ID mapping tables.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/memberships/{membershipIdentifier}/configuredTableAssociations" + "uri": "/memberships/{membershipIdentifier}/idmappingtables" }, "smithy.api#paginated": { - "items": "configuredTableAssociationSummaries" + "items": "idMappingTableSummaries", + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" }, "smithy.api#readonly": {} } }, - "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsInput": { + "com.amazonaws.cleanrooms#ListIdMappingTablesInput": { "type": "structure", "members": { "membershipIdentifier": { "target": "com.amazonaws.cleanrooms#MembershipIdentifier", "traits": { - "smithy.api#documentation": "

A unique identifier for the membership to list configured table associations for.\n Currently accepts the membership ID.

", + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID mapping tables that you want to view.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7352,44 +9728,50 @@ "nextToken": { "target": "com.amazonaws.cleanrooms#PaginationToken", "traits": { - "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

", + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

", "smithy.api#httpQuery": "nextToken" } }, "maxResults": { "target": "com.amazonaws.cleanrooms#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum size of the results that is returned per call.

", + "smithy.api#documentation": "

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", "smithy.api#httpQuery": "maxResults" } } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.cleanrooms#ListConfiguredTableAssociationsOutput": { + "com.amazonaws.cleanrooms#ListIdMappingTablesOutput": { "type": "structure", "members": { - "configuredTableAssociationSummaries": { - "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationSummaryList", + "idMappingTableSummaries": { + "target": "com.amazonaws.cleanrooms#IdMappingTableSummaryList", "traits": { - "smithy.api#documentation": "

The retrieved list of configured table associations.

", + "smithy.api#documentation": "

The summary information of the ID mapping tables that you requested.

", "smithy.api#required": {} } }, "nextToken": { "target": "com.amazonaws.cleanrooms#PaginationToken", "traits": { - "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

" + "smithy.api#documentation": "

The token value provided to access the next page of results.

" } } + }, + "traits": { + "smithy.api#output": {} } }, - "com.amazonaws.cleanrooms#ListConfiguredTables": { + "com.amazonaws.cleanrooms#ListIdNamespaceAssociations": { "type": "operation", "input": { - "target": "com.amazonaws.cleanrooms#ListConfiguredTablesInput" + "target": "com.amazonaws.cleanrooms#ListIdNamespaceAssociationsInput" }, "output": { - "target": "com.amazonaws.cleanrooms#ListConfiguredTablesOutput" + "target": "com.amazonaws.cleanrooms#ListIdNamespaceAssociationsOutput" }, "errors": [ { @@ -7398,6 +9780,9 @@ { "target": "com.amazonaws.cleanrooms#InternalServerException" }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, { "target": "com.amazonaws.cleanrooms#ThrottlingException" }, @@ -7406,54 +9791,71 @@ } ], "traits": { - "aws.iam#actionPermissionDescription": "Grants permission to list available configured tables", - "smithy.api#documentation": "

Lists configured tables.

", + "aws.iam#actionPermissionDescription": "Grants permission to list entity resolution data associations for a membership", + "smithy.api#documentation": "

Returns a list of ID namespace associations.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/configuredTables" + "uri": "/memberships/{membershipIdentifier}/idnamespaceassociations" }, "smithy.api#paginated": { - "items": "configuredTableSummaries" + "items": "idNamespaceAssociationSummaries", + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" }, "smithy.api#readonly": {} } }, - "com.amazonaws.cleanrooms#ListConfiguredTablesInput": { + "com.amazonaws.cleanrooms#ListIdNamespaceAssociationsInput": { "type": "structure", "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID namespace association that you want to view.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, "nextToken": { "target": "com.amazonaws.cleanrooms#PaginationToken", "traits": { - "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

", + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

", "smithy.api#httpQuery": "nextToken" } }, "maxResults": { "target": "com.amazonaws.cleanrooms#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum size of the results that is returned per call.

", + "smithy.api#documentation": "

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", "smithy.api#httpQuery": "maxResults" } } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.cleanrooms#ListConfiguredTablesOutput": { + "com.amazonaws.cleanrooms#ListIdNamespaceAssociationsOutput": { "type": "structure", "members": { - "configuredTableSummaries": { - "target": "com.amazonaws.cleanrooms#ConfiguredTableSummaryList", + "nextToken": { + "target": "com.amazonaws.cleanrooms#PaginationToken", "traits": { - "smithy.api#documentation": "

The configured tables listed by the request.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The token value provided to access the next page of results.

" } }, - "nextToken": { - "target": "com.amazonaws.cleanrooms#PaginationToken", + "idNamespaceAssociationSummaries": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationSummaryList", "traits": { - "smithy.api#documentation": "

The token value retrieved from a previous call to access the next page of\n results.

" + "smithy.api#documentation": "

The summary information of the ID namespace associations that you requested.

", + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.cleanrooms#ListMembers": { @@ -8131,7 +10533,7 @@ "paymentConfiguration": { "target": "com.amazonaws.cleanrooms#PaymentConfiguration", "traits": { - "smithy.api#documentation": "

The collaboration\n member's payment responsibilities set by the collaboration creator.\n

\n

If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer.

" + "smithy.api#documentation": "

The collaboration member's payment responsibilities set by the collaboration creator.

\n

If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer.

" } } }, @@ -8176,7 +10578,7 @@ "status": { "target": "com.amazonaws.cleanrooms#MemberStatus", "traits": { - "smithy.api#documentation": "

The status of the member.\n

", + "smithy.api#documentation": "

The status of the member.

", "smithy.api#required": {} } }, @@ -8223,7 +10625,7 @@ "paymentConfiguration": { "target": "com.amazonaws.cleanrooms#PaymentConfiguration", "traits": { - "smithy.api#documentation": "

The collaboration\n member's payment responsibilities set by the collaboration creator.\n

", + "smithy.api#documentation": "

The collaboration member's payment responsibilities set by the collaboration creator.

", "smithy.api#required": {} } } @@ -8308,7 +10710,7 @@ "status": { "target": "com.amazonaws.cleanrooms#MembershipStatus", "traits": { - "smithy.api#documentation": "

The status of the\n membership.

", + "smithy.api#documentation": "

The status of the membership.

", "smithy.api#required": {} } }, @@ -8335,7 +10737,7 @@ "paymentConfiguration": { "target": "com.amazonaws.cleanrooms#MembershipPaymentConfiguration", "traits": { - "smithy.api#documentation": "

The payment\n responsibilities accepted by the collaboration member.

", + "smithy.api#documentation": "

The payment responsibilities accepted by the collaboration member.

", "smithy.api#required": {} } } @@ -8369,13 +10771,13 @@ "queryCompute": { "target": "com.amazonaws.cleanrooms#MembershipQueryComputePaymentConfig", "traits": { - "smithy.api#documentation": "

The payment\n responsibilities accepted by the collaboration member for query compute\n costs.

", + "smithy.api#documentation": "

The payment responsibilities accepted by the collaboration member for query compute\n costs.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

An object\n representing the payment responsibilities accepted by the collaboration\n member.

" + "smithy.api#documentation": "

An object representing the payment responsibilities accepted by the collaboration\n member.

" } }, "com.amazonaws.cleanrooms#MembershipProtectedQueryOutputConfiguration": { @@ -8416,13 +10818,13 @@ "isResponsible": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether\n the collaboration member has accepted to pay for query compute costs (TRUE) or\n has not accepted to pay for query compute costs\n (FALSE).

\n

If the collaboration creator has not specified anyone to pay for query compute costs,\n then the member who can query is the default payer.

\n

An error message is returned for the following reasons:

\n
    \n
  • \n

    If you set the value to FALSE but you are responsible to pay for\n query compute costs.

    \n
  • \n
  • \n

    If you set the value to TRUE but you are not responsible to pay for\n query compute costs.

    \n
  • \n
", + "smithy.api#documentation": "

Indicates whether the collaboration member has accepted to pay for query compute costs\n (TRUE) or has not accepted to pay for query compute costs\n (FALSE).

\n

If the collaboration creator has not specified anyone to pay for query compute costs,\n then the member who can query is the default payer.

\n

An error message is returned for the following reasons:

\n
    \n
  • \n

    If you set the value to FALSE but you are responsible to pay for\n query compute costs.

    \n
  • \n
  • \n

    If you set the value to TRUE but you are not responsible to pay for\n query compute costs.

    \n
  • \n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

An object\n representing the payment responsibilities accepted by the collaboration member for query\n compute costs.

" + "smithy.api#documentation": "

An object representing the payment responsibilities accepted by the collaboration member\n for query compute costs.

" } }, "com.amazonaws.cleanrooms#MembershipQueryLogStatus": { @@ -8582,7 +10984,7 @@ "status": { "target": "com.amazonaws.cleanrooms#MembershipStatus", "traits": { - "smithy.api#documentation": "

The status of the\n membership.

", + "smithy.api#documentation": "

The status of the membership.

", "smithy.api#required": {} } }, @@ -8596,7 +10998,7 @@ "paymentConfiguration": { "target": "com.amazonaws.cleanrooms#MembershipPaymentConfiguration", "traits": { - "smithy.api#documentation": "

The payment\n responsibilities accepted by the collaboration member.

", + "smithy.api#documentation": "

The payment responsibilities accepted by the collaboration member.

", "smithy.api#required": {} } } @@ -8748,13 +11150,92 @@ "queryCompute": { "target": "com.amazonaws.cleanrooms#QueryComputePaymentConfig", "traits": { - "smithy.api#documentation": "

The collaboration\n member's payment responsibilities set by the collaboration creator for query compute\n costs.

", + "smithy.api#documentation": "

The collaboration member's payment responsibilities set by the collaboration creator for\n query compute costs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object representing the collaboration member's payment responsibilities set by the\n collaboration creator.

" + } + }, + "com.amazonaws.cleanrooms#PopulateIdMappingTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#PopulateIdMappingTableInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#PopulateIdMappingTableOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to link an id mapping workflow with a collaboration by creating a new table", + "smithy.api#documentation": "

Defines the information that's necessary to populate an ID mapping table.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}/populate" + } + } + }, + "com.amazonaws.cleanrooms#PopulateIdMappingTableInput": { + "type": "structure", + "members": { + "idMappingTableIdentifier": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID mapping table that you want to populate.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdMappingTableId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID mapping table that you want to populate.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#PopulateIdMappingTableOutput": { + "type": "structure", + "members": { + "idMappingJobId": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the mapping job that will populate the ID mapping table.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

An object\n representing the collaboration member's payment responsibilities set by the collaboration\n creator.

" + "smithy.api#output": {} } }, "com.amazonaws.cleanrooms#PreviewPrivacyImpact": { @@ -9026,7 +11507,7 @@ "parameters": { "target": "com.amazonaws.cleanrooms#PrivacyBudgetTemplateParametersOutput", "traits": { - "smithy.api#documentation": "

Specifies the epislon and noise parameters for the privacy budget template.

", + "smithy.api#documentation": "

Specifies the\n epsilon\n and noise parameters for the privacy budget template.

", "smithy.api#required": {} } } @@ -9363,6 +11844,21 @@ "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" } }, + "com.amazonaws.cleanrooms#ProtectedQueryMemberOutputConfiguration": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.cleanrooms#AccountId", + "traits": { + "smithy.api#documentation": "

The\n unique identifier for the account.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration details for the protected query member output.

" + } + }, "com.amazonaws.cleanrooms#ProtectedQueryMemberOutputList": { "type": "list", "member": { @@ -9395,7 +11891,13 @@ "s3": { "target": "com.amazonaws.cleanrooms#ProtectedQueryS3OutputConfiguration", "traits": { - "smithy.api#documentation": "

Required configuration for a protected query with an `S3` output type.

" + "smithy.api#documentation": "

Required configuration for a protected query with an\n s3\n output type.

" + } + }, + "member": { + "target": "com.amazonaws.cleanrooms#ProtectedQueryMemberOutputConfiguration", + "traits": { + "smithy.api#documentation": "

Required configuration for a protected query with a member output type.

" } } }, @@ -9489,7 +11991,7 @@ "traits": { "smithy.api#documentation": "

The query string to be submitted.

", "smithy.api#length": { - "max": 90000 + "max": 500000 } } }, @@ -9532,7 +12034,7 @@ "totalDurationInMillis": { "target": "smithy.api#Long", "traits": { - "smithy.api#documentation": "

The duration of the Protected Query, from creation until query completion.

" + "smithy.api#documentation": "

The duration of the protected query, from creation until query completion.

" } } }, @@ -9613,6 +12115,14 @@ "smithy.api#documentation": "

The status of the protected query. Value values are `SUBMITTED`, `STARTED`, `CANCELLED`,\n `CANCELLING`, `FAILED`, `SUCCESS`, `TIMED_OUT`.

", "smithy.api#required": {} } + }, + "receiverConfigurations": { + "target": "com.amazonaws.cleanrooms#ReceiverConfigurationsList", + "traits": { + "smithy.api#default": [], + "smithy.api#documentation": "

The receiver configuration.

", + "smithy.api#required": {} + } } }, "traits": { @@ -9642,13 +12152,52 @@ "isResponsible": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether\n the collaboration creator has configured the collaboration member to pay for query compute\n costs (TRUE) or has not configured the collaboration member to pay for query\n compute costs (FALSE).

\n

Exactly one member can be configured to pay for query compute costs. An error is\n returned if the collaboration creator sets a TRUE value for more than one\n member in the collaboration.

\n

If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. An error is returned if\n the collaboration creator sets a FALSE value for the member who can\n query.

", + "smithy.api#documentation": "

Indicates whether the collaboration creator has configured the collaboration member to\n pay for query compute costs (TRUE) or has not configured the collaboration\n member to pay for query compute costs (FALSE).

\n

Exactly one member can be configured to pay for query compute costs. An error is\n returned if the collaboration creator sets a TRUE value for more than one\n member in the collaboration.

\n

If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. An error is returned if\n the collaboration creator sets a FALSE value for the member who can\n query.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

An object\n representing the collaboration member's payment responsibilities set by the collaboration\n creator for query compute costs.

" + "smithy.api#documentation": "

An object representing the collaboration member's payment responsibilities set by the\n collaboration creator for query compute costs.

" + } + }, + "com.amazonaws.cleanrooms#QueryConstraint": { + "type": "union", + "members": { + "requireOverlap": { + "target": "com.amazonaws.cleanrooms#QueryConstraintRequireOverlap", + "traits": { + "smithy.api#documentation": "

An array of column names that specifies which columns are required in the JOIN statement.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides any necessary query constraint information.

" + } + }, + "com.amazonaws.cleanrooms#QueryConstraintList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#QueryConstraint" + }, + "traits": { + "smithy.api#length": { + "max": 1 + } + } + }, + "com.amazonaws.cleanrooms#QueryConstraintRequireOverlap": { + "type": "structure", + "members": { + "columns": { + "target": "com.amazonaws.cleanrooms#AnalysisRuleColumnList", + "traits": { + "smithy.api#documentation": "

The columns that are required to overlap.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides the name of the columns that are required to overlap.

" } }, "com.amazonaws.cleanrooms#QueryTables": { @@ -9657,6 +12206,39 @@ "target": "com.amazonaws.cleanrooms#TableAlias" } }, + "com.amazonaws.cleanrooms#ReceiverAccountIds": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#AccountId" + } + }, + "com.amazonaws.cleanrooms#ReceiverConfiguration": { + "type": "structure", + "members": { + "analysisType": { + "target": "com.amazonaws.cleanrooms#AnalysisType", + "traits": { + "smithy.api#documentation": "

The type of analysis for the protected query. The results of the query can be analyzed directly (DIRECT_ANALYSIS) or used as input into additional analyses (ADDITIONAL_ANALYSIS), such as a query that is a seed for a lookalike ML model.

", + "smithy.api#required": {} + } + }, + "configurationDetails": { + "target": "com.amazonaws.cleanrooms#ConfigurationDetails", + "traits": { + "smithy.api#documentation": "

The configuration details of the receiver configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The receiver configuration for a protected query.

" + } + }, + "com.amazonaws.cleanrooms#ReceiverConfigurationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.cleanrooms#ReceiverConfiguration" + } + }, "com.amazonaws.cleanrooms#ResourceAlias": { "type": "string", "traits": { @@ -9955,6 +12537,12 @@ "smithy.api#documentation": "

Details about the status of the schema. Currently, only one entry is present.

", "smithy.api#required": {} } + }, + "schemaTypeProperties": { + "target": "com.amazonaws.cleanrooms#SchemaTypeProperties", + "traits": { + "smithy.api#documentation": "

The schema type properties.

" + } } }, "traits": { @@ -10057,7 +12645,7 @@ "status": { "target": "com.amazonaws.cleanrooms#SchemaStatus", "traits": { - "smithy.api#documentation": "

The status of the schema.

", + "smithy.api#documentation": "

The status of the schema, indicating if it is ready to query.

", "smithy.api#required": {} } }, @@ -10078,6 +12666,14 @@ "traits": { "smithy.api#documentation": "

The configuration details of the schema analysis rule for the given type.

" } + }, + "analysisType": { + "target": "com.amazonaws.cleanrooms#AnalysisType", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The type of analysis that can be performed on the schema.

\n

A schema can have an analysisType of DIRECT_ANALYSIS, ADDITIONAL_ANALYSIS_FOR_AUDIENCE_GENERATION, or both.

", + "smithy.api#required": {} + } } }, "traits": { @@ -10100,43 +12696,85 @@ "smithy.api#required": {} } }, - "message": { - "target": "smithy.api#String", + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

An explanation of the schema status reason code.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A reason why the schema status is set to its current value.

" + } + }, + "com.amazonaws.cleanrooms#SchemaStatusReasonCode": { + "type": "enum", + "members": { + "ANALYSIS_RULE_MISSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ANALYSIS_RULE_MISSING" + } + }, + "ANALYSIS_TEMPLATES_NOT_CONFIGURED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ANALYSIS_TEMPLATES_NOT_CONFIGURED" + } + }, + "ANALYSIS_PROVIDERS_NOT_CONFIGURED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ANALYSIS_PROVIDERS_NOT_CONFIGURED" + } + }, + "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED" + } + }, + "ID_MAPPING_TABLE_NOT_POPULATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ID_MAPPING_TABLE_NOT_POPULATED" + } + }, + "COLLABORATION_ANALYSIS_RULE_NOT_CONFIGURED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COLLABORATION_ANALYSIS_RULE_NOT_CONFIGURED" + } + }, + "ADDITIONAL_ANALYSES_NOT_CONFIGURED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An explanation of the schema status reason code.

", - "smithy.api#required": {} + "smithy.api#enumValue": "ADDITIONAL_ANALYSES_NOT_CONFIGURED" } - } - }, - "traits": { - "smithy.api#documentation": "

A reason why the schema status is set to its current value.

" - } - }, - "com.amazonaws.cleanrooms#SchemaStatusReasonCode": { - "type": "enum", - "members": { - "ANALYSIS_RULE_MISSING": { + }, + "RESULT_RECEIVERS_NOT_CONFIGURED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "ANALYSIS_RULE_MISSING" + "smithy.api#enumValue": "RESULT_RECEIVERS_NOT_CONFIGURED" } }, - "ANALYSIS_TEMPLATES_NOT_CONFIGURED": { + "ADDITIONAL_ANALYSES_NOT_ALLOWED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "ANALYSIS_TEMPLATES_NOT_CONFIGURED" + "smithy.api#enumValue": "ADDITIONAL_ANALYSES_NOT_ALLOWED" } }, - "ANALYSIS_PROVIDERS_NOT_CONFIGURED": { + "RESULT_RECEIVERS_NOT_ALLOWED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "ANALYSIS_PROVIDERS_NOT_CONFIGURED" + "smithy.api#enumValue": "RESULT_RECEIVERS_NOT_ALLOWED" } }, - "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED": { + "ANALYSIS_RULE_TYPES_NOT_COMPATIBLE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED" + "smithy.api#enumValue": "ANALYSIS_RULE_TYPES_NOT_COMPATIBLE" } } } @@ -10232,7 +12870,27 @@ "traits": { "smithy.api#enumValue": "TABLE" } + }, + "ID_MAPPING_TABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ID_MAPPING_TABLE" + } + } + } + }, + "com.amazonaws.cleanrooms#SchemaTypeProperties": { + "type": "union", + "members": { + "idMappingTable": { + "target": "com.amazonaws.cleanrooms#IdMappingTableSchemaTypeProperties", + "traits": { + "smithy.api#documentation": "

The ID mapping table for the schema type properties.

" + } } + }, + "traits": { + "smithy.api#documentation": "

Information about the schema type properties.

" } }, "com.amazonaws.cleanrooms#ServiceQuotaExceededException": { @@ -10977,6 +13635,102 @@ } } }, + "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationAnalysisRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationAnalysisRuleInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationAnalysisRuleOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#ConflictException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants permission to update analysis rules for a configured table association" + }, + "smithy.api#documentation": "

\n Updates\n the analysis rule for a configured table association.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}" + } + } + }, + "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationAnalysisRuleInput": { + "type": "structure", + "members": { + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "configuredTableAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier for the configured table association to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "ConfiguredTableAssociationId" + } + }, + "analysisRuleType": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRuleType", + "traits": { + "smithy.api#documentation": "

The analysis rule type\n that\n you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "analysisRulePolicy": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRulePolicy", + "traits": { + "smithy.api#documentation": "

\n The\n updated analysis rule policy for the configured table association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationAnalysisRuleOutput": { + "type": "structure", + "members": { + "analysisRule": { + "target": "com.amazonaws.cleanrooms#ConfiguredTableAssociationAnalysisRule", + "traits": { + "smithy.api#documentation": "

\n The\n updated analysis rule for the configured table association. In the console, the\n ConfiguredTableAssociationAnalysisRule is referred to as the\n collaboration analysis rule.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#UpdateConfiguredTableAssociationInput": { "type": "structure", "members": { @@ -11062,6 +13816,188 @@ } } }, + "com.amazonaws.cleanrooms#UpdateIdMappingTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#UpdateIdMappingTableInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#UpdateIdMappingTableOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to update an id mapping table", + "smithy.api#documentation": "

Provides the details that are necessary to update an ID mapping table.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}" + } + } + }, + "com.amazonaws.cleanrooms#UpdateIdMappingTableInput": { + "type": "structure", + "members": { + "idMappingTableIdentifier": { + "target": "com.amazonaws.cleanrooms#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID mapping table that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdMappingTableId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID mapping table that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

A new description for the ID mapping table.

" + } + }, + "kmsKeyArn": { + "target": "com.amazonaws.cleanrooms#KMSKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services KMS key.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#UpdateIdMappingTableOutput": { + "type": "structure", + "members": { + "idMappingTable": { + "target": "com.amazonaws.cleanrooms#IdMappingTable", + "traits": { + "smithy.api#documentation": "

The updated ID mapping table.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cleanrooms#UpdateIdNamespaceAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.cleanrooms#UpdateIdNamespaceAssociationInput" + }, + "output": { + "target": "com.amazonaws.cleanrooms#UpdateIdNamespaceAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.cleanrooms#AccessDeniedException" + }, + { + "target": "com.amazonaws.cleanrooms#InternalServerException" + }, + { + "target": "com.amazonaws.cleanrooms#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.cleanrooms#ThrottlingException" + }, + { + "target": "com.amazonaws.cleanrooms#ValidationException" + } + ], + "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to update a entity resolution input association", + "smithy.api#documentation": "

Provides the details that are necessary to update an ID namespace association.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}" + } + } + }, + "com.amazonaws.cleanrooms#UpdateIdNamespaceAssociationInput": { + "type": "structure", + "members": { + "idNamespaceAssociationIdentifier": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ID namespace association that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IdNamespaceAssociationId" + } + }, + "membershipIdentifier": { + "target": "com.amazonaws.cleanrooms#MembershipIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the membership that contains the ID namespace association that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "MembershipId" + } + }, + "name": { + "target": "com.amazonaws.cleanrooms#GenericResourceName", + "traits": { + "smithy.api#documentation": "

A new name for the ID namespace association.

" + } + }, + "description": { + "target": "com.amazonaws.cleanrooms#ResourceDescription", + "traits": { + "smithy.api#documentation": "

A new description for the ID namespace association.

" + } + }, + "idMappingConfig": { + "target": "com.amazonaws.cleanrooms#IdMappingConfig", + "traits": { + "smithy.api#documentation": "

The configuration settings for the ID mapping table.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cleanrooms#UpdateIdNamespaceAssociationOutput": { + "type": "structure", + "members": { + "idNamespaceAssociation": { + "target": "com.amazonaws.cleanrooms#IdNamespaceAssociation", + "traits": { + "smithy.api#documentation": "

The updated ID namespace association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.cleanrooms#UpdateMembership": { "type": "operation", "input": { diff --git a/models/cleanroomsml.json b/models/cleanroomsml.json index 9c87536012..aba430385f 100644 --- a/models/cleanroomsml.json +++ b/models/cleanroomsml.json @@ -755,6 +755,16 @@ "smithy.api#pattern": "^[0-9]{12}$" } }, + "com.amazonaws.cleanroomsml#AnalysisTemplateArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + }, + "smithy.api#pattern": "^arn:aws[-a-z]*:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+$" + } + }, "com.amazonaws.cleanroomsml#AudienceDestination": { "type": "structure", "members": { @@ -951,16 +961,21 @@ "dataSource": { "target": "com.amazonaws.cleanroomsml#S3ConfigMap", "traits": { - "smithy.api#documentation": "

Defines the Amazon S3 bucket where the seed audience for the generating audience is stored. A valid data source is a JSON line file in the following format:

\n

\n {\"user_id\": \"111111\"}\n

\n

\n {\"user_id\": \"222222\"}\n

\n

\n ...\n

", - "smithy.api#required": {} + "smithy.api#documentation": "

Defines the Amazon S3 bucket where the seed audience for the generating audience is stored. A valid data source is a JSON line file in the following format:

\n

\n {\"user_id\": \"111111\"}\n

\n

\n {\"user_id\": \"222222\"}\n

\n

\n ...\n

" } }, "roleArn": { "target": "com.amazonaws.cleanroomsml#IamRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role that can read the Amazon S3 bucket where the training data is stored.

", + "smithy.api#documentation": "

The ARN of the IAM role that can read the Amazon S3 bucket where the seed audience is stored.

", "smithy.api#required": {} } + }, + "sqlParameters": { + "target": "com.amazonaws.cleanroomsml#ProtectedQuerySQLParameters", + "traits": { + "smithy.api#documentation": "

The protected SQL query parameters.

" + } } }, "traits": { @@ -1316,7 +1331,7 @@ } }, "traits": { - "smithy.api#documentation": "

Configure the list of audience output sizes that can be created. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE\n AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage\n AudienceSize to configure sizes in the range 1-100 percent.

" + "smithy.api#documentation": "

Returns the relevance scores at these audience sizes when used in the GetAudienceGenerationJob for a specified audience generation job and configured audience model.

\n

Specifies the list of allowed audienceSize values when used in the StartAudienceExportJob for an audience generation job. You can use the ABSOLUTE\n AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage\n AudienceSize to configure sizes in the range 1-100 percent.

" } }, "com.amazonaws.cleanroomsml#AudienceSizeType": { @@ -2380,6 +2395,12 @@ "traits": { "smithy.api#documentation": "

The tags that are associated to this audience generation job.

" } + }, + "protectedQueryIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the protected query for this audience generation job.

" + } } }, "traits": { @@ -3440,6 +3461,34 @@ } } }, + "com.amazonaws.cleanroomsml#ParameterKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[0-9a-zA-Z_]+$" + } + }, + "com.amazonaws.cleanroomsml#ParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.cleanroomsml#ParameterKey" + }, + "value": { + "target": "com.amazonaws.cleanroomsml#ParameterValue" + } + }, + "com.amazonaws.cleanroomsml#ParameterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 250 + } + } + }, "com.amazonaws.cleanroomsml#PolicyExistenceCondition": { "type": "enum", "members": { @@ -3457,6 +3506,36 @@ } } }, + "com.amazonaws.cleanroomsml#ProtectedQuerySQLParameters": { + "type": "structure", + "members": { + "queryString": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The query string to be submitted.

", + "smithy.api#length": { + "max": 90000 + } + } + }, + "analysisTemplateArn": { + "target": "com.amazonaws.cleanroomsml#AnalysisTemplateArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) associated with the analysis template within a\n collaboration.

" + } + }, + "parameters": { + "target": "com.amazonaws.cleanroomsml#ParameterMap", + "traits": { + "smithy.api#documentation": "

The protected query SQL parameters.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameters for the SQL type Protected Query.

", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.cleanroomsml#PutConfiguredAudienceModelPolicy": { "type": "operation", "input": { diff --git a/models/cloudwatch-logs.json b/models/cloudwatch-logs.json index 4eaf79a584..27633f61b9 100644 --- a/models/cloudwatch-logs.json +++ b/models/cloudwatch-logs.json @@ -2325,7 +2325,20 @@ "outputToken": "nextToken", "items": "logGroups", "pageSize": "limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeLogGroupsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.cloudwatchlogs#DescribeLogGroupsRequest": { @@ -3946,7 +3959,23 @@ "outputToken": "nextForwardToken", "items": "events", "pageSize": "limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "GetLogEventsFailure", + "params": { + "logGroupName": "fakegroup", + "logStreamName": "fakestream" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.cloudwatchlogs#GetLogEventsRequest": { @@ -5328,7 +5357,7 @@ "sdkId": "CloudWatch Logs", "arnNamespace": "logs", "cloudFormationName": "Logs", - "cloudTrailEventSource": "cloudwatchlogs.amazonaws.com", + "cloudTrailEventSource": "logs.amazonaws.com", "docId": "logs-2014-03-28", "endpointPrefix": "logs" }, diff --git a/models/cloudwatch.json b/models/cloudwatch.json index 55af845550..5d98c3b992 100644 --- a/models/cloudwatch.json +++ b/models/cloudwatch.json @@ -2594,7 +2594,7 @@ "sdkId": "CloudWatch", "arnNamespace": "monitoring", "cloudFormationName": "CloudWatch", - "cloudTrailEventSource": "cloudwatch.amazonaws.com", + "cloudTrailEventSource": "monitoring.amazonaws.com", "docId": "monitoring-2010-08-01", "endpointPrefix": "monitoring" }, @@ -4360,7 +4360,22 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListMetricsSuccess", + "params": { + "Namespace": "AWS/EC2" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.cloudwatch#ListMetricsInput": { diff --git a/models/codebuild.json b/models/codebuild.json index 5b1dcc7ce6..d9230e92cf 100644 --- a/models/codebuild.json +++ b/models/codebuild.json @@ -124,6 +124,12 @@ "traits": { "smithy.api#enumValue": "CODECONNECTIONS" } + }, + "SECRETS_MANAGER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SECRETS_MANAGER" + } } } }, @@ -4551,7 +4557,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports the source repository credentials for an CodeBuild project that has its\n source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

" + "smithy.api#documentation": "

Imports the source repository credentials for an CodeBuild project that has its\n source code stored in a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository.

" } }, "com.amazonaws.codebuild#ImportSourceCredentialsInput": { @@ -4566,7 +4572,7 @@ "token": { "target": "com.amazonaws.codebuild#SensitiveNonEmptyString", "traits": { - "smithy.api#documentation": "

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket,\n this is either the access token or the app password. For the authType CODECONNECTIONS, \n this is the connectionArn.

", + "smithy.api#documentation": "

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket,\n this is either the access token or the app password. For the authType CODECONNECTIONS, \n this is the connectionArn. For the authType SECRETS_MANAGER, this is the secretArn.

", "smithy.api#required": {} } }, @@ -4580,7 +4586,7 @@ "authType": { "target": "com.amazonaws.codebuild#AuthType", "traits": { - "smithy.api#documentation": "

The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or\n Bitbucket repository. An OAUTH connection is not supported by the API and must be\n created using the CodeBuild console. Note that CODECONNECTIONS is only valid for \n GitLab and GitLab Self Managed.

", + "smithy.api#documentation": "

The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or\n Bitbucket repository. An OAUTH connection is not supported by the API and must be\n created using the CodeBuild console.

", "smithy.api#required": {} } }, @@ -6437,7 +6443,7 @@ "auth": { "target": "com.amazonaws.codebuild#SourceAuth", "traits": { - "smithy.api#documentation": "

Information about the authorization settings for CodeBuild to access the source code to be\n built.

\n

This information is for the CodeBuild console's use only. Your code should not get or set\n this information directly.

" + "smithy.api#documentation": "

Information about the authorization settings for CodeBuild to access the source code to be\n built.

" } }, "reportBuildStatus": { @@ -7543,7 +7549,7 @@ "type": { "target": "com.amazonaws.codebuild#SourceAuthType", "traits": { - "smithy.api#documentation": "

The authorization type to use. Valid options are OAUTH or CODECONNECTIONS.

", + "smithy.api#documentation": "

The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER.

", "smithy.api#required": {} } }, @@ -7555,7 +7561,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the authorization settings for CodeBuild to access the source code to be\n built.

\n

This information is for the CodeBuild console's use only. Your code should not get or set\n this information directly.

" + "smithy.api#documentation": "

Information about the authorization settings for CodeBuild to access the source code to be\n built.

" } }, "com.amazonaws.codebuild#SourceAuthType": { @@ -7572,6 +7578,12 @@ "traits": { "smithy.api#enumValue": "CODECONNECTIONS" } + }, + "SECRETS_MANAGER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SECRETS_MANAGER" + } } } }, @@ -7593,13 +7605,13 @@ "authType": { "target": "com.amazonaws.codebuild#AuthType", "traits": { - "smithy.api#documentation": "

The type of authentication used by the credentials. Valid options are OAUTH,\n BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.

" + "smithy.api#documentation": "

The type of authentication used by the credentials. Valid options are OAUTH,\n BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.

" } }, "resource": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS.

" + "smithy.api#documentation": "

The connection ARN if your authType is CODECONNECTIONS or SECRETS_MANAGER.

" } } }, diff --git a/models/codecommit.json b/models/codecommit.json index 8edd4f8bc9..18e1a10841 100644 --- a/models/codecommit.json +++ b/models/codecommit.json @@ -4174,6 +4174,9 @@ { "target": "com.amazonaws.codecommit#InvalidTagsMapException" }, + { + "target": "com.amazonaws.codecommit#OperationNotAllowedException" + }, { "target": "com.amazonaws.codecommit#RepositoryLimitExceededException" }, @@ -4219,7 +4222,7 @@ "kmsKeyId": { "target": "com.amazonaws.codecommit#KmsKeyId", "traits": { - "smithy.api#documentation": "

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for kmsKeyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference.

\n

If no key is specified, the default aws/codecommit Amazon Web Services managed key is used.

" + "smithy.api#documentation": "

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for kmsKeyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference.

\n

If no key is specified, the default aws/codecommit Amazon Web Services managed key is used.

" } } }, @@ -11528,6 +11531,21 @@ "smithy.api#documentation": "

Information about the type of an object in a merge operation.

" } }, + "com.amazonaws.codecommit#OperationNotAllowedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.codecommit#Message", + "traits": { + "smithy.api#documentation": "

Any message associated with the exception.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested action is not allowed.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.codecommit#OrderEnum": { "type": "enum", "members": { @@ -15511,7 +15529,7 @@ "kmsKeyId": { "target": "com.amazonaws.codecommit#KmsKeyId", "traits": { - "smithy.api#documentation": "

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for keyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference.

", + "smithy.api#documentation": "

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for keyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference.

", "smithy.api#required": {} } } diff --git a/models/codepipeline.json b/models/codepipeline.json index 777d741a7d..1c660cc5ba 100644 --- a/models/codepipeline.json +++ b/models/codepipeline.json @@ -1679,6 +1679,21 @@ } } }, + "com.amazonaws.codepipeline#BeforeEntryConditions": { + "type": "structure", + "members": { + "conditions": { + "target": "com.amazonaws.codepipeline#ConditionList", + "traits": { + "smithy.api#documentation": "

The conditions that are configured as entry conditions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The conditions for making checks for entry to a stage.

" + } + }, "com.amazonaws.codepipeline#BlockerDeclaration": { "type": "structure", "members": { @@ -1819,12 +1834,21 @@ { "target": "com.amazonaws.codepipeline#ListPipelines" }, + { + "target": "com.amazonaws.codepipeline#ListRuleExecutions" + }, + { + "target": "com.amazonaws.codepipeline#ListRuleTypes" + }, { "target": "com.amazonaws.codepipeline#ListTagsForResource" }, { "target": "com.amazonaws.codepipeline#ListWebhooks" }, + { + "target": "com.amazonaws.codepipeline#OverrideStageCondition" + }, { "target": "com.amazonaws.codepipeline#PollForJobs" }, @@ -2863,6 +2887,166 @@ "smithy.api#error": "client" } }, + "com.amazonaws.codepipeline#Condition": { + "type": "structure", + "members": { + "result": { + "target": "com.amazonaws.codepipeline#Result", + "traits": { + "smithy.api#documentation": "

The action to be done when the condition is met. For example, rolling back an execution for a failure condition.

" + } + }, + "rules": { + "target": "com.amazonaws.codepipeline#RuleDeclarationList", + "traits": { + "smithy.api#documentation": "

The rules that make up the condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The condition for the stage. A condition is made up of the rules and the result for the condition.

" + } + }, + "com.amazonaws.codepipeline#ConditionExecution": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.codepipeline#ConditionExecutionStatus", + "traits": { + "smithy.api#documentation": "

The status of the run for a condition.

" + } + }, + "summary": { + "target": "com.amazonaws.codepipeline#ExecutionSummary", + "traits": { + "smithy.api#documentation": "

The summary of information about a run for a condition.

" + } + }, + "lastStatusChange": { + "target": "com.amazonaws.codepipeline#Timestamp", + "traits": { + "smithy.api#documentation": "

The last status change of the condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The run of a condition.

" + } + }, + "com.amazonaws.codepipeline#ConditionExecutionStatus": { + "type": "enum", + "members": { + "InProgress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" + } + }, + "Failed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "Errored": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Errored" + } + }, + "Succeeded": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Succeeded" + } + }, + "Cancelled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Cancelled" + } + }, + "Abandoned": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Abandoned" + } + }, + "Overridden": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Overridden" + } + } + } + }, + "com.amazonaws.codepipeline#ConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#Condition" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.codepipeline#ConditionNotOverridableException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.codepipeline#String" + } + }, + "traits": { + "smithy.api#documentation": "

Unable to override because the condition does not allow overrides.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.codepipeline#ConditionState": { + "type": "structure", + "members": { + "latestExecution": { + "target": "com.amazonaws.codepipeline#ConditionExecution", + "traits": { + "smithy.api#documentation": "

The state of the latest run of the rule.

" + } + }, + "ruleStates": { + "target": "com.amazonaws.codepipeline#RuleStateList", + "traits": { + "smithy.api#documentation": "

The state of the rules for the condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the state of the condition.

" + } + }, + "com.amazonaws.codepipeline#ConditionStateList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#ConditionState" + } + }, + "com.amazonaws.codepipeline#ConditionType": { + "type": "enum", + "members": { + "BEFORE_ENTRY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BEFORE_ENTRY" + } + }, + "ON_SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ON_SUCCESS" + } + } + } + }, "com.amazonaws.codepipeline#ConflictException": { "type": "structure", "members": { @@ -3655,6 +3839,12 @@ "traits": { "smithy.api#documentation": "

The specified result for when the failure conditions are met, such as rolling back the\n stage.

" } + }, + "conditions": { + "target": "com.amazonaws.codepipeline#ConditionList", + "traits": { + "smithy.api#documentation": "

The conditions that are configured as failure conditions.

" + } } }, "traits": { @@ -5241,144 +5431,98 @@ "smithy.api#output": {} } }, - "com.amazonaws.codepipeline#ListTagsForResource": { + "com.amazonaws.codepipeline#ListRuleExecutions": { "type": "operation", "input": { - "target": "com.amazonaws.codepipeline#ListTagsForResourceInput" + "target": "com.amazonaws.codepipeline#ListRuleExecutionsInput" }, "output": { - "target": "com.amazonaws.codepipeline#ListTagsForResourceOutput" + "target": "com.amazonaws.codepipeline#ListRuleExecutionsOutput" }, "errors": [ { - "target": "com.amazonaws.codepipeline#InvalidArnException" + "target": "com.amazonaws.codepipeline#InvalidNextTokenException" }, { - "target": "com.amazonaws.codepipeline#InvalidNextTokenException" + "target": "com.amazonaws.codepipeline#PipelineExecutionNotFoundException" }, { - "target": "com.amazonaws.codepipeline#ResourceNotFoundException" + "target": "com.amazonaws.codepipeline#PipelineNotFoundException" }, { "target": "com.amazonaws.codepipeline#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Gets the set of key-value pairs (metadata) that are used to manage the\n resource.

", + "smithy.api#documentation": "

Lists the rule executions that have occurred in a pipeline configured for conditions with rules.

", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", - "items": "tags", + "items": "ruleExecutionDetails", "pageSize": "maxResults" } } }, - "com.amazonaws.codepipeline#ListTagsForResourceInput": { + "com.amazonaws.codepipeline#ListRuleExecutionsInput": { "type": "structure", "members": { - "resourceArn": { - "target": "com.amazonaws.codepipeline#ResourceArn", + "pipelineName": { + "target": "com.amazonaws.codepipeline#PipelineName", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to get tags for.

", + "smithy.api#documentation": "

The name of the pipeline for which you want to get execution summary\n information.

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.codepipeline#NextToken", + "filter": { + "target": "com.amazonaws.codepipeline#RuleExecutionFilter", "traits": { - "smithy.api#documentation": "

The token that was returned from the previous API call, which would be used to return\n the next page of the list. The ListTagsforResource call lists all available tags in one\n call and does not use pagination.

" + "smithy.api#documentation": "

Input information used to filter rule execution history.

" } }, "maxResults": { "target": "com.amazonaws.codepipeline#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in a single call.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.codepipeline#ListTagsForResourceOutput": { - "type": "structure", - "members": { - "tags": { - "target": "com.amazonaws.codepipeline#TagList", - "traits": { - "smithy.api#documentation": "

The tags for the resource.

" + "smithy.api#documentation": "

The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value. Pipeline history is\n limited to the most recent 12 months, based on pipeline execution start times. Default\n value is 100.

" } }, "nextToken": { "target": "com.amazonaws.codepipeline#NextToken", "traits": { - "smithy.api#documentation": "

If the amount of returned information is significantly large, an identifier is also\n returned and can be used in a subsequent API call to return the next page of the list.\n The ListTagsforResource call lists all available tags in one call and does not use\n pagination.

" + "smithy.api#documentation": "

The token that was returned from the previous ListRuleExecutions\n call, which can be used to return the next set of rule executions in the\n list.

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#input": {} } }, - "com.amazonaws.codepipeline#ListWebhookItem": { + "com.amazonaws.codepipeline#ListRuleExecutionsOutput": { "type": "structure", "members": { - "definition": { - "target": "com.amazonaws.codepipeline#WebhookDefinition", - "traits": { - "smithy.api#documentation": "

The detail returned for each webhook, such as the webhook authentication type and\n filter rules.

", - "smithy.api#required": {} - } - }, - "url": { - "target": "com.amazonaws.codepipeline#WebhookUrl", - "traits": { - "smithy.api#documentation": "

A unique URL generated by CodePipeline. When a POST request is made to this\n URL, the defined pipeline is started as long as the body of the post request satisfies\n the defined authentication and filtering conditions. Deleting and re-creating a webhook\n makes the old URL invalid and generates a new one.

", - "smithy.api#required": {} - } - }, - "errorMessage": { - "target": "com.amazonaws.codepipeline#WebhookErrorMessage", - "traits": { - "smithy.api#documentation": "

The text of the error message about the webhook.

" - } - }, - "errorCode": { - "target": "com.amazonaws.codepipeline#WebhookErrorCode", + "ruleExecutionDetails": { + "target": "com.amazonaws.codepipeline#RuleExecutionDetailList", "traits": { - "smithy.api#documentation": "

The number code of the error.

" - } - }, - "lastTriggered": { - "target": "com.amazonaws.codepipeline#WebhookLastTriggered", - "traits": { - "smithy.api#documentation": "

The date and time a webhook was last successfully triggered, in timestamp\n format.

" - } - }, - "arn": { - "target": "com.amazonaws.codepipeline#WebhookArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the webhook.

" + "smithy.api#documentation": "

Details about the output for listing rule executions.

" } }, - "tags": { - "target": "com.amazonaws.codepipeline#TagList", + "nextToken": { + "target": "com.amazonaws.codepipeline#NextToken", "traits": { - "smithy.api#documentation": "

Specifies the tags applied to the webhook.

" + "smithy.api#documentation": "

A token that can be used in the next ListRuleExecutions call. To\n view all items in the list, continue to call this operation with each subsequent token\n until no more nextToken values are returned.

" } } }, "traits": { - "smithy.api#documentation": "

The detail returned for each webhook after listing webhooks, such as the webhook\n URL, the webhook name, and the webhook ARN.

" + "smithy.api#output": {} } }, - "com.amazonaws.codepipeline#ListWebhooks": { + "com.amazonaws.codepipeline#ListRuleTypes": { "type": "operation", "input": { - "target": "com.amazonaws.codepipeline#ListWebhooksInput" + "target": "com.amazonaws.codepipeline#ListRuleTypesInput" }, "output": { - "target": "com.amazonaws.codepipeline#ListWebhooksOutput" + "target": "com.amazonaws.codepipeline#ListRuleTypesOutput" }, "errors": [ { @@ -5389,28 +5533,22 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a listing of all the webhooks in this Amazon Web Services Region for this\n account. The output lists all webhooks and includes the webhook URL and ARN and the\n configuration for each webhook.

", - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "items": "webhooks", - "pageSize": "MaxResults" - } + "smithy.api#documentation": "

Lists the rules for the condition.

" } }, - "com.amazonaws.codepipeline#ListWebhooksInput": { + "com.amazonaws.codepipeline#ListRuleTypesInput": { "type": "structure", "members": { - "NextToken": { - "target": "com.amazonaws.codepipeline#NextToken", + "ruleOwnerFilter": { + "target": "com.amazonaws.codepipeline#RuleOwner", "traits": { - "smithy.api#documentation": "

The token that was returned from the previous ListWebhooks call, which can be used\n to return the next set of webhooks in the list.

" + "smithy.api#documentation": "

The rule owner to filter on.

" } }, - "MaxResults": { - "target": "com.amazonaws.codepipeline#MaxResults", + "regionFilter": { + "target": "com.amazonaws.codepipeline#AWSRegionName", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value.

" + "smithy.api#documentation": "

The rule Region to filter on.

" } } }, @@ -5418,18 +5556,210 @@ "smithy.api#input": {} } }, - "com.amazonaws.codepipeline#ListWebhooksOutput": { + "com.amazonaws.codepipeline#ListRuleTypesOutput": { "type": "structure", "members": { - "webhooks": { - "target": "com.amazonaws.codepipeline#WebhookList", + "ruleTypes": { + "target": "com.amazonaws.codepipeline#RuleTypeList", "traits": { - "smithy.api#documentation": "

The JSON detail returned for each webhook in the list output for the ListWebhooks\n call.

" + "smithy.api#documentation": "

Lists the rules that are configured for the condition.

", + "smithy.api#required": {} } - }, - "NextToken": { - "target": "com.amazonaws.codepipeline#NextToken", - "traits": { + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codepipeline#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.codepipeline#ListTagsForResourceInput" + }, + "output": { + "target": "com.amazonaws.codepipeline#ListTagsForResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codepipeline#InvalidArnException" + }, + { + "target": "com.amazonaws.codepipeline#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.codepipeline#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.codepipeline#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the set of key-value pairs (metadata) that are used to manage the\n resource.

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "tags", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.codepipeline#ListTagsForResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.codepipeline#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to get tags for.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.codepipeline#NextToken", + "traits": { + "smithy.api#documentation": "

The token that was returned from the previous API call, which would be used to return\n the next page of the list. The ListTagsforResource call lists all available tags in one\n call and does not use pagination.

" + } + }, + "maxResults": { + "target": "com.amazonaws.codepipeline#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codepipeline#ListTagsForResourceOutput": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.codepipeline#TagList", + "traits": { + "smithy.api#documentation": "

The tags for the resource.

" + } + }, + "nextToken": { + "target": "com.amazonaws.codepipeline#NextToken", + "traits": { + "smithy.api#documentation": "

If the amount of returned information is significantly large, an identifier is also\n returned and can be used in a subsequent API call to return the next page of the list.\n The ListTagsforResource call lists all available tags in one call and does not use\n pagination.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codepipeline#ListWebhookItem": { + "type": "structure", + "members": { + "definition": { + "target": "com.amazonaws.codepipeline#WebhookDefinition", + "traits": { + "smithy.api#documentation": "

The detail returned for each webhook, such as the webhook authentication type and\n filter rules.

", + "smithy.api#required": {} + } + }, + "url": { + "target": "com.amazonaws.codepipeline#WebhookUrl", + "traits": { + "smithy.api#documentation": "

A unique URL generated by CodePipeline. When a POST request is made to this\n URL, the defined pipeline is started as long as the body of the post request satisfies\n the defined authentication and filtering conditions. Deleting and re-creating a webhook\n makes the old URL invalid and generates a new one.

", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.codepipeline#WebhookErrorMessage", + "traits": { + "smithy.api#documentation": "

The text of the error message about the webhook.

" + } + }, + "errorCode": { + "target": "com.amazonaws.codepipeline#WebhookErrorCode", + "traits": { + "smithy.api#documentation": "

The number code of the error.

" + } + }, + "lastTriggered": { + "target": "com.amazonaws.codepipeline#WebhookLastTriggered", + "traits": { + "smithy.api#documentation": "

The date and time a webhook was last successfully triggered, in timestamp\n format.

" + } + }, + "arn": { + "target": "com.amazonaws.codepipeline#WebhookArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the webhook.

" + } + }, + "tags": { + "target": "com.amazonaws.codepipeline#TagList", + "traits": { + "smithy.api#documentation": "

Specifies the tags applied to the webhook.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The detail returned for each webhook after listing webhooks, such as the webhook\n URL, the webhook name, and the webhook ARN.

" + } + }, + "com.amazonaws.codepipeline#ListWebhooks": { + "type": "operation", + "input": { + "target": "com.amazonaws.codepipeline#ListWebhooksInput" + }, + "output": { + "target": "com.amazonaws.codepipeline#ListWebhooksOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codepipeline#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.codepipeline#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a listing of all the webhooks in this Amazon Web Services Region for this\n account. The output lists all webhooks and includes the webhook URL and ARN and the\n configuration for each webhook.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "webhooks", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.codepipeline#ListWebhooksInput": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.codepipeline#NextToken", + "traits": { + "smithy.api#documentation": "

The token that was returned from the previous ListWebhooks call, which can be used\n to return the next set of webhooks in the list.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.codepipeline#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codepipeline#ListWebhooksOutput": { + "type": "structure", + "members": { + "webhooks": { + "target": "com.amazonaws.codepipeline#WebhookList", + "traits": { + "smithy.api#documentation": "

The JSON detail returned for each webhook in the list output for the ListWebhooks\n call.

" + } + }, + "NextToken": { + "target": "com.amazonaws.codepipeline#NextToken", + "traits": { "smithy.api#documentation": "

If the amount of returned information is significantly large, an identifier is also\n returned and can be used in a subsequent ListWebhooks call to return the next set of\n webhooks in the list.

" } } @@ -5606,6 +5936,77 @@ "com.amazonaws.codepipeline#OutputVariablesValue": { "type": "string" }, + "com.amazonaws.codepipeline#OverrideStageCondition": { + "type": "operation", + "input": { + "target": "com.amazonaws.codepipeline#OverrideStageConditionInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.codepipeline#ConcurrentPipelineExecutionsLimitExceededException" + }, + { + "target": "com.amazonaws.codepipeline#ConditionNotOverridableException" + }, + { + "target": "com.amazonaws.codepipeline#ConflictException" + }, + { + "target": "com.amazonaws.codepipeline#NotLatestPipelineExecutionException" + }, + { + "target": "com.amazonaws.codepipeline#PipelineNotFoundException" + }, + { + "target": "com.amazonaws.codepipeline#StageNotFoundException" + }, + { + "target": "com.amazonaws.codepipeline#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Used to override a stage condition.

" + } + }, + "com.amazonaws.codepipeline#OverrideStageConditionInput": { + "type": "structure", + "members": { + "pipelineName": { + "target": "com.amazonaws.codepipeline#PipelineName", + "traits": { + "smithy.api#documentation": "

The name of the pipeline with the stage that will override the condition.

", + "smithy.api#required": {} + } + }, + "stageName": { + "target": "com.amazonaws.codepipeline#StageName", + "traits": { + "smithy.api#documentation": "

The name of the stage for the override.

", + "smithy.api#required": {} + } + }, + "pipelineExecutionId": { + "target": "com.amazonaws.codepipeline#PipelineExecutionId", + "traits": { + "smithy.api#documentation": "

The ID of the pipeline execution for the override.

", + "smithy.api#required": {} + } + }, + "conditionType": { + "target": "com.amazonaws.codepipeline#ConditionType", + "traits": { + "smithy.api#documentation": "

The type of condition to override for the stage, such as entry conditions, failure conditions, or success conditions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.codepipeline#Percentage": { "type": "integer", "traits": { @@ -6495,7 +6896,10 @@ "target": "com.amazonaws.codepipeline#ActionNotFoundException" }, { - "target": "com.amazonaws.codepipeline#PipelineNotFoundException" + "target": "com.amazonaws.codepipeline#ConcurrentPipelineExecutionsLimitExceededException" + }, + { + "target": "com.amazonaws.codepipeline#PipelineNotFoundException" }, { "target": "com.amazonaws.codepipeline#StageNotFoundException" @@ -7069,6 +7473,15 @@ "target": "com.amazonaws.codepipeline#ResolvedPipelineVariable" } }, + "com.amazonaws.codepipeline#ResolvedRuleConfigurationMap": { + "type": "map", + "key": { + "target": "com.amazonaws.codepipeline#String" + }, + "value": { + "target": "com.amazonaws.codepipeline#String" + } + }, "com.amazonaws.codepipeline#ResourceArn": { "type": "string", "traits": { @@ -7098,6 +7511,12 @@ "traits": { "smithy.api#enumValue": "ROLLBACK" } + }, + "FAIL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAIL" + } } } }, @@ -7110,6 +7529,9 @@ "target": "com.amazonaws.codepipeline#RetryStageExecutionOutput" }, "errors": [ + { + "target": "com.amazonaws.codepipeline#ConcurrentPipelineExecutionsLimitExceededException" + }, { "target": "com.amazonaws.codepipeline#ConflictException" }, @@ -7301,291 +7723,1053 @@ "smithy.api#output": {} } }, - "com.amazonaws.codepipeline#S3ArtifactLocation": { - "type": "structure", + "com.amazonaws.codepipeline#RuleCategory": { + "type": "enum", "members": { - "bucketName": { - "target": "com.amazonaws.codepipeline#S3BucketName", - "traits": { - "smithy.api#documentation": "

The name of the S3 bucket.

", - "smithy.api#required": {} - } - }, - "objectKey": { - "target": "com.amazonaws.codepipeline#S3ObjectKey", + "Rule": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The key of the object in the S3 bucket, which uniquely identifies the object in the\n bucket.

", - "smithy.api#required": {} + "smithy.api#enumValue": "Rule" } } - }, - "traits": { - "smithy.api#documentation": "

The location of the S3 bucket that contains a revision.

" - } - }, - "com.amazonaws.codepipeline#S3Bucket": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 3, - "max": 63 - } } }, - "com.amazonaws.codepipeline#S3BucketName": { - "type": "string" - }, - "com.amazonaws.codepipeline#S3Key": { + "com.amazonaws.codepipeline#RuleConfigurationKey": { "type": "string", "traits": { "smithy.api#length": { "min": 1, - "max": 100 + "max": 50 } } }, - "com.amazonaws.codepipeline#S3Location": { - "type": "structure", - "members": { - "bucket": { - "target": "com.amazonaws.codepipeline#S3Bucket", - "traits": { - "smithy.api#documentation": "

The Amazon S3 artifact bucket for an action's artifacts.

" - } - }, - "key": { - "target": "com.amazonaws.codepipeline#S3Key", - "traits": { - "smithy.api#documentation": "

The artifact name.

" - } - } + "com.amazonaws.codepipeline#RuleConfigurationMap": { + "type": "map", + "key": { + "target": "com.amazonaws.codepipeline#RuleConfigurationKey" + }, + "value": { + "target": "com.amazonaws.codepipeline#RuleConfigurationValue" }, - "traits": { - "smithy.api#documentation": "

The Amazon S3 artifact location for an action's artifacts.

" - } - }, - "com.amazonaws.codepipeline#S3ObjectKey": { - "type": "string" - }, - "com.amazonaws.codepipeline#SecretAccessKey": { - "type": "string", - "traits": { - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.codepipeline#ServicePrincipal": { - "type": "string", "traits": { "smithy.api#length": { - "min": 1, - "max": 128 + "min": 0, + "max": 200 } } }, - "com.amazonaws.codepipeline#SessionToken": { - "type": "string", - "traits": { - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.codepipeline#SourceRevision": { + "com.amazonaws.codepipeline#RuleConfigurationProperty": { "type": "structure", "members": { - "actionName": { - "target": "com.amazonaws.codepipeline#ActionName", + "name": { + "target": "com.amazonaws.codepipeline#RuleConfigurationKey", "traits": { - "smithy.api#documentation": "

The name of the action that processed the revision to the source\n artifact.

", + "smithy.api#documentation": "

The name of the rule configuration property.

", "smithy.api#required": {} } }, - "revisionId": { - "target": "com.amazonaws.codepipeline#Revision", + "required": { + "target": "com.amazonaws.codepipeline#Boolean", "traits": { - "smithy.api#documentation": "

The system-generated unique ID that identifies the revision number of the\n artifact.

" + "smithy.api#default": false, + "smithy.api#documentation": "

Whether the configuration property is a required value.

", + "smithy.api#required": {} } }, - "revisionSummary": { - "target": "com.amazonaws.codepipeline#RevisionSummary", + "key": { + "target": "com.amazonaws.codepipeline#Boolean", "traits": { - "smithy.api#documentation": "

Summary information about the most recent revision of the artifact. For GitHub and\n CodeCommit repositories, the commit message. For Amazon S3 buckets or actions,\n the user-provided content of a codepipeline-artifact-revision-summary key\n specified in the object metadata.

" + "smithy.api#default": false, + "smithy.api#documentation": "

Whether the configuration property is a key.

", + "smithy.api#required": {} } }, - "revisionUrl": { - "target": "com.amazonaws.codepipeline#Url", + "secret": { + "target": "com.amazonaws.codepipeline#Boolean", "traits": { - "smithy.api#documentation": "

The commit ID for the artifact revision. For artifacts stored in GitHub or\n CodeCommit repositories, the commit ID is linked to a commit details page.

" + "smithy.api#default": false, + "smithy.api#documentation": "

Whether the configuration property is secret.

\n

When updating a pipeline, passing * * * * * without changing any other values of\n the action preserves the previous value of the secret.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Information about the version (or revision) of a source artifact that initiated a\n pipeline execution.

" - } - }, - "com.amazonaws.codepipeline#SourceRevisionList": { - "type": "list", - "member": { - "target": "com.amazonaws.codepipeline#SourceRevision" - } - }, - "com.amazonaws.codepipeline#SourceRevisionOverride": { - "type": "structure", - "members": { - "actionName": { - "target": "com.amazonaws.codepipeline#ActionName", + }, + "queryable": { + "target": "com.amazonaws.codepipeline#Boolean", "traits": { - "smithy.api#documentation": "

The name of the action where the override will be applied.

", - "smithy.api#required": {} + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether the property can be queried.

\n

If you create a pipeline with a condition and rule, and that rule contains a queryable property, the value for that configuration property is subject to other\n restrictions. The value must be less than or equal to twenty (20) characters. The value\n can contain only alphanumeric characters, underscores, and hyphens.

" } }, - "revisionType": { - "target": "com.amazonaws.codepipeline#SourceRevisionType", + "description": { + "target": "com.amazonaws.codepipeline#Description", "traits": { - "smithy.api#documentation": "

The type of source revision, based on the source provider. For example, the revision\n type for the CodeCommit action provider is the commit ID.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of the action configuration property that is displayed to\n users.

" } }, - "revisionValue": { - "target": "com.amazonaws.codepipeline#Revision", + "type": { + "target": "com.amazonaws.codepipeline#RuleConfigurationPropertyType", "traits": { - "smithy.api#documentation": "

The source revision, or version of your source artifact, with the changes that you\n want to run in the pipeline execution.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The type of the configuration property.

" } } }, "traits": { - "smithy.api#documentation": "

A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution.

\n \n

For the S3_OBJECT_VERSION_ID and S3_OBJECT_KEY types of source revisions, either\n of the types can be used independently, or they can be used together to override the\n source with a specific ObjectKey and VersionID.

\n
" + "smithy.api#documentation": "

Represents information about a rule configuration property.

" } }, - "com.amazonaws.codepipeline#SourceRevisionOverrideList": { + "com.amazonaws.codepipeline#RuleConfigurationPropertyList": { "type": "list", "member": { - "target": "com.amazonaws.codepipeline#SourceRevisionOverride" + "target": "com.amazonaws.codepipeline#RuleConfigurationProperty" }, "traits": { "smithy.api#length": { "min": 0, - "max": 50 + "max": 10 } } }, - "com.amazonaws.codepipeline#SourceRevisionType": { + "com.amazonaws.codepipeline#RuleConfigurationPropertyType": { "type": "enum", "members": { - "COMMIT_ID": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "COMMIT_ID" - } - }, - "IMAGE_DIGEST": { + "String": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "IMAGE_DIGEST" + "smithy.api#enumValue": "String" } }, - "S3_OBJECT_VERSION_ID": { + "Number": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "S3_OBJECT_VERSION_ID" + "smithy.api#enumValue": "Number" } }, - "S3_OBJECT_KEY": { + "Boolean": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "S3_OBJECT_KEY" + "smithy.api#enumValue": "Boolean" } } } }, - "com.amazonaws.codepipeline#StageActionDeclarationList": { - "type": "list", - "member": { - "target": "com.amazonaws.codepipeline#ActionDeclaration" - } - }, - "com.amazonaws.codepipeline#StageBlockerDeclarationList": { - "type": "list", - "member": { - "target": "com.amazonaws.codepipeline#BlockerDeclaration" - } - }, - "com.amazonaws.codepipeline#StageContext": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.codepipeline#StageName", - "traits": { - "smithy.api#documentation": "

The name of the stage.

" - } - } - }, + "com.amazonaws.codepipeline#RuleConfigurationValue": { + "type": "string", "traits": { - "smithy.api#documentation": "

Represents information about a stage to a job worker.

" + "smithy.api#length": { + "min": 1, + "max": 10000 + } } }, - "com.amazonaws.codepipeline#StageDeclaration": { + "com.amazonaws.codepipeline#RuleDeclaration": { "type": "structure", "members": { "name": { - "target": "com.amazonaws.codepipeline#StageName", + "target": "com.amazonaws.codepipeline#RuleName", "traits": { - "smithy.api#documentation": "

The name of the stage.

", + "smithy.api#documentation": "

The name of the rule that is created for the condition, such as CheckAllResults.

", "smithy.api#required": {} } }, - "blockers": { - "target": "com.amazonaws.codepipeline#StageBlockerDeclarationList", + "ruleTypeId": { + "target": "com.amazonaws.codepipeline#RuleTypeId", "traits": { - "smithy.api#documentation": "

Reserved for future use.

" + "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

", + "smithy.api#required": {} } }, - "actions": { - "target": "com.amazonaws.codepipeline#StageActionDeclarationList", + "configuration": { + "target": "com.amazonaws.codepipeline#RuleConfigurationMap", "traits": { - "smithy.api#documentation": "

The actions included in a stage.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The action configuration fields for the rule.

" } }, - "onFailure": { - "target": "com.amazonaws.codepipeline#FailureConditions", + "inputArtifacts": { + "target": "com.amazonaws.codepipeline#InputArtifactList", "traits": { - "smithy.api#documentation": "

The method to use when a stage has not completed successfully. For example,\n configuring this field for rollback will roll back a failed stage automatically to the\n last successful pipeline execution in the stage.

" + "smithy.api#documentation": "

The input artifacts fields for the rule, such as specifying an input file for the rule.

" } - } - }, - "traits": { - "smithy.api#documentation": "

Represents information about a stage and its definition.

" - } - }, - "com.amazonaws.codepipeline#StageExecution": { - "type": "structure", - "members": { - "pipelineExecutionId": { - "target": "com.amazonaws.codepipeline#PipelineExecutionId", + }, + "roleArn": { + "target": "com.amazonaws.codepipeline#RoleArn", "traits": { - "smithy.api#documentation": "

The ID of the pipeline execution associated with the stage.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The pipeline role ARN associated with the rule.

" } }, - "status": { - "target": "com.amazonaws.codepipeline#StageExecutionStatus", + "region": { + "target": "com.amazonaws.codepipeline#AWSRegionName", "traits": { - "smithy.api#documentation": "

The status of the stage, or for a completed stage, the last status of the\n stage.

\n \n

A status of cancelled means that the pipeline’s definition was updated before the\n stage execution could be completed.

\n
", - "smithy.api#required": {} + "smithy.api#documentation": "

The Region for the condition associated with the rule.

" } }, - "type": { - "target": "com.amazonaws.codepipeline#ExecutionType", + "timeoutInMinutes": { + "target": "com.amazonaws.codepipeline#RuleTimeout", "traits": { - "smithy.api#documentation": "

The type of pipeline execution for the stage, such as a rollback pipeline\n execution.

" + "smithy.api#documentation": "

The action timeout for the rule.

" } } }, "traits": { - "smithy.api#documentation": "

Represents information about the run of a stage.

" + "smithy.api#documentation": "

Represents information about the rule to be created for an associated condition. An example would be creating a new rule for an entry condition, such as a rule that checks for a test result before allowing the run to enter the deployment stage.

" } }, - "com.amazonaws.codepipeline#StageExecutionList": { + "com.amazonaws.codepipeline#RuleDeclarationList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#RuleDeclaration" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.codepipeline#RuleExecution": { + "type": "structure", + "members": { + "ruleExecutionId": { + "target": "com.amazonaws.codepipeline#RuleExecutionId", + "traits": { + "smithy.api#documentation": "

The execution ID for the run of the rule.

" + } + }, + "status": { + "target": "com.amazonaws.codepipeline#RuleExecutionStatus", + "traits": { + "smithy.api#documentation": "

The status of the run of the rule, such as FAILED.

" + } + }, + "summary": { + "target": "com.amazonaws.codepipeline#ExecutionSummary", + "traits": { + "smithy.api#documentation": "

A summary of the run of the rule.

" + } + }, + "lastStatusChange": { + "target": "com.amazonaws.codepipeline#Timestamp", + "traits": { + "smithy.api#documentation": "

The last status change of the rule.

" + } + }, + "token": { + "target": "com.amazonaws.codepipeline#RuleExecutionToken", + "traits": { + "smithy.api#documentation": "

The system-generated token used to identify a unique request.

" + } + }, + "lastUpdatedBy": { + "target": "com.amazonaws.codepipeline#LastUpdatedBy", + "traits": { + "smithy.api#documentation": "

The ARN of the user who last changed the rule.

" + } + }, + "externalExecutionId": { + "target": "com.amazonaws.codepipeline#ExecutionId", + "traits": { + "smithy.api#documentation": "

The external ID of the run of the rule.

" + } + }, + "externalExecutionUrl": { + "target": "com.amazonaws.codepipeline#Url", + "traits": { + "smithy.api#documentation": "

The URL of a resource external to Amazon Web Services that is used when running the\n rule (for example, an external repository URL).

" + } + }, + "errorDetails": { + "target": "com.amazonaws.codepipeline#ErrorDetails" + } + }, + "traits": { + "smithy.api#documentation": "

Represents information about each time a rule is run as part of the pipeline execution for a pipeline configured with conditions.

" + } + }, + "com.amazonaws.codepipeline#RuleExecutionDetail": { + "type": "structure", + "members": { + "pipelineExecutionId": { + "target": "com.amazonaws.codepipeline#PipelineExecutionId", + "traits": { + "smithy.api#documentation": "

The ID of the pipeline execution in the stage where the rule was run. Use the GetPipelineState action to retrieve the current pipelineExecutionId of\n the stage.

" + } + }, + "ruleExecutionId": { + "target": "com.amazonaws.codepipeline#RuleExecutionId", + "traits": { + "smithy.api#documentation": "

The ID of the run for the rule.

" + } + }, + "pipelineVersion": { + "target": "com.amazonaws.codepipeline#PipelineVersion", + "traits": { + "smithy.api#documentation": "

The version number of the pipeline with the stage where the rule was run.

" + } + }, + "stageName": { + "target": "com.amazonaws.codepipeline#StageName", + "traits": { + "smithy.api#documentation": "

The name of the stage where the rule was run.

" + } + }, + "ruleName": { + "target": "com.amazonaws.codepipeline#RuleName", + "traits": { + "smithy.api#documentation": "

The name of the rule that was run in the stage.

" + } + }, + "startTime": { + "target": "com.amazonaws.codepipeline#Timestamp", + "traits": { + "smithy.api#documentation": "

The start time of the rule execution.

" + } + }, + "lastUpdateTime": { + "target": "com.amazonaws.codepipeline#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time of the last change to the rule execution, in timestamp\n format.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.codepipeline#LastUpdatedBy", + "traits": { + "smithy.api#documentation": "

The ARN of the user who changed the rule execution details.

" + } + }, + "status": { + "target": "com.amazonaws.codepipeline#RuleExecutionStatus", + "traits": { + "smithy.api#documentation": "

The status of the rule execution. Status categories are InProgress,\n Succeeded, and Failed.\n

" + } + }, + "input": { + "target": "com.amazonaws.codepipeline#RuleExecutionInput", + "traits": { + "smithy.api#documentation": "

Input details for the rule execution, such as role ARN, Region, and input\n artifacts.

" + } + }, + "output": { + "target": "com.amazonaws.codepipeline#RuleExecutionOutput", + "traits": { + "smithy.api#documentation": "

Output details for the rule execution, such as the rule execution result.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the runs for a rule and the results produced on an artifact as it passes\n through stages in the pipeline.

" + } + }, + "com.amazonaws.codepipeline#RuleExecutionDetailList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#RuleExecutionDetail" + } + }, + "com.amazonaws.codepipeline#RuleExecutionFilter": { + "type": "structure", + "members": { + "pipelineExecutionId": { + "target": "com.amazonaws.codepipeline#PipelineExecutionId", + "traits": { + "smithy.api#documentation": "

The pipeline execution ID used to filter rule execution history.

" + } + }, + "latestInPipelineExecution": { + "target": "com.amazonaws.codepipeline#LatestInPipelineExecutionFilter" + } + }, + "traits": { + "smithy.api#documentation": "

Filter values for the rule execution.

" + } + }, + "com.amazonaws.codepipeline#RuleExecutionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.codepipeline#RuleExecutionInput": { + "type": "structure", + "members": { + "ruleTypeId": { + "target": "com.amazonaws.codepipeline#RuleTypeId", + "traits": { + "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + } + }, + "configuration": { + "target": "com.amazonaws.codepipeline#RuleConfigurationMap", + "traits": { + "smithy.api#documentation": "

Configuration data for a rule execution, such as the resolved values for that run.

" + } + }, + "resolvedConfiguration": { + "target": "com.amazonaws.codepipeline#ResolvedRuleConfigurationMap", + "traits": { + "smithy.api#documentation": "

Configuration data for a rule execution with all variable references replaced with\n their real values for the execution.

" + } + }, + "roleArn": { + "target": "com.amazonaws.codepipeline#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the IAM service role that performs the declared rule. This is assumed\n through the roleArn for the pipeline.

" + } + }, + "region": { + "target": "com.amazonaws.codepipeline#AWSRegionName", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region for the rule, such as us-east-1.

" + } + }, + "inputArtifacts": { + "target": "com.amazonaws.codepipeline#ArtifactDetailList", + "traits": { + "smithy.api#documentation": "

Details of input artifacts of the rule that correspond to the rule \n execution.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Input information used for a rule execution.

" + } + }, + "com.amazonaws.codepipeline#RuleExecutionOutput": { + "type": "structure", + "members": { + "executionResult": { + "target": "com.amazonaws.codepipeline#RuleExecutionResult", + "traits": { + "smithy.api#documentation": "

Execution result information listed in the output details for a rule\n execution.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Output details listed for a rule execution, such as the rule execution\n result.

" + } + }, + "com.amazonaws.codepipeline#RuleExecutionResult": { + "type": "structure", + "members": { + "externalExecutionId": { + "target": "com.amazonaws.codepipeline#ExternalExecutionId", + "traits": { + "smithy.api#documentation": "

The external ID for the rule execution.

" + } + }, + "externalExecutionSummary": { + "target": "com.amazonaws.codepipeline#ExternalExecutionSummary", + "traits": { + "smithy.api#documentation": "

The external provider summary for the rule execution.

" + } + }, + "externalExecutionUrl": { + "target": "com.amazonaws.codepipeline#Url", + "traits": { + "smithy.api#documentation": "

The deepest external link to the external resource (for example, a repository URL or\n deployment endpoint) that is used when running the rule.

" + } + }, + "errorDetails": { + "target": "com.amazonaws.codepipeline#ErrorDetails" + } + }, + "traits": { + "smithy.api#documentation": "

Execution result information, such as the external execution ID.

" + } + }, + "com.amazonaws.codepipeline#RuleExecutionStatus": { + "type": "enum", + "members": { + "InProgress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" + } + }, + "Abandoned": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Abandoned" + } + }, + "Succeeded": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Succeeded" + } + }, + "Failed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + } + } + }, + "com.amazonaws.codepipeline#RuleExecutionToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\-\\.]+$" + } + }, + "com.amazonaws.codepipeline#RuleName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[A-Za-z0-9.@\\-_]+$" + } + }, + "com.amazonaws.codepipeline#RuleOwner": { + "type": "enum", + "members": { + "AWS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS" + } + } + } + }, + "com.amazonaws.codepipeline#RuleProvider": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 35 + }, + "smithy.api#pattern": "^[0-9A-Za-z_-]+$" + } + }, + "com.amazonaws.codepipeline#RuleRevision": { + "type": "structure", + "members": { + "revisionId": { + "target": "com.amazonaws.codepipeline#Revision", + "traits": { + "smithy.api#documentation": "

The system-generated unique ID that identifies the revision number of the\n rule.

", + "smithy.api#required": {} + } + }, + "revisionChangeId": { + "target": "com.amazonaws.codepipeline#RevisionChangeIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the change that set the state to this revision (for\n example, a deployment ID or timestamp).

", + "smithy.api#required": {} + } + }, + "created": { + "target": "com.amazonaws.codepipeline#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time when the most recent version of the rule was created, in\n timestamp format.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The change to a rule that creates a revision of the rule.

" + } + }, + "com.amazonaws.codepipeline#RuleState": { + "type": "structure", + "members": { + "ruleName": { + "target": "com.amazonaws.codepipeline#RuleName", + "traits": { + "smithy.api#documentation": "

The name of the rule.

" + } + }, + "currentRevision": { + "target": "com.amazonaws.codepipeline#RuleRevision", + "traits": { + "smithy.api#documentation": "

The ID of the current revision of the artifact successfully worked on by the\n job.

" + } + }, + "latestExecution": { + "target": "com.amazonaws.codepipeline#RuleExecution", + "traits": { + "smithy.api#documentation": "

Represents information about the latest run of an rule.

" + } + }, + "entityUrl": { + "target": "com.amazonaws.codepipeline#Url", + "traits": { + "smithy.api#documentation": "

A URL link for more information about the state of the action, such as a details page.

" + } + }, + "revisionUrl": { + "target": "com.amazonaws.codepipeline#Url", + "traits": { + "smithy.api#documentation": "

A URL link for more information about the revision, such as a commit details\n page.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Returns information about the state of a rule.

\n \n

Values returned in the revisionId field indicate the rule revision information, such as the commit ID, for the current state.

\n
" + } + }, + "com.amazonaws.codepipeline#RuleStateList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#RuleState" + } + }, + "com.amazonaws.codepipeline#RuleTimeout": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 5, + "max": 86400 + } + } + }, + "com.amazonaws.codepipeline#RuleType": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.codepipeline#RuleTypeId", + "traits": { + "smithy.api#documentation": "

Represents information about a rule type.

", + "smithy.api#required": {} + } + }, + "settings": { + "target": "com.amazonaws.codepipeline#RuleTypeSettings", + "traits": { + "smithy.api#documentation": "

Returns information about the settings for a rule type.

" + } + }, + "ruleConfigurationProperties": { + "target": "com.amazonaws.codepipeline#RuleConfigurationPropertyList", + "traits": { + "smithy.api#documentation": "

The configuration properties for the rule type.

" + } + }, + "inputArtifactDetails": { + "target": "com.amazonaws.codepipeline#ArtifactDetails", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The rule type, which is made up of the combined values for category, owner, provider, and version.

" + } + }, + "com.amazonaws.codepipeline#RuleTypeId": { + "type": "structure", + "members": { + "category": { + "target": "com.amazonaws.codepipeline#RuleCategory", + "traits": { + "smithy.api#documentation": "

A category defines what kind of rule can be run in the stage, and constrains\n the provider type for the rule. Valid categories are limited to one of the following\n values.

\n
    \n
  • \n

    INVOKE

    \n
  • \n
  • \n

    Approval

    \n
  • \n
  • \n

    Rule

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "owner": { + "target": "com.amazonaws.codepipeline#RuleOwner", + "traits": { + "smithy.api#documentation": "

The creator of the rule being called. The valid value for the\n Owner field in the rule category is AWS.

" + } + }, + "provider": { + "target": "com.amazonaws.codepipeline#RuleProvider", + "traits": { + "smithy.api#documentation": "

The provider of the service being called by the rule. Valid providers are\n determined by the rulecategory. For example, a managed rule in the Rule category type\n has an owner of AWS, which would be specified as\n AWS.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.codepipeline#Version", + "traits": { + "smithy.api#documentation": "

A string that describes the rule version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + } + }, + "com.amazonaws.codepipeline#RuleTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#RuleType" + } + }, + "com.amazonaws.codepipeline#RuleTypeSettings": { + "type": "structure", + "members": { + "thirdPartyConfigurationUrl": { + "target": "com.amazonaws.codepipeline#Url", + "traits": { + "smithy.api#documentation": "

The URL of a sign-up page where users can sign up for an external service and\n perform initial configuration of the action provided by that service.

" + } + }, + "entityUrlTemplate": { + "target": "com.amazonaws.codepipeline#UrlTemplate", + "traits": { + "smithy.api#documentation": "

The URL returned to the CodePipeline console that provides a deep link to the\n resources of the external system, such as the configuration page for a CodeDeploy\n deployment group. This link is provided as part of the action display in the\n pipeline.

" + } + }, + "executionUrlTemplate": { + "target": "com.amazonaws.codepipeline#UrlTemplate", + "traits": { + "smithy.api#documentation": "

The URL returned to the CodePipeline console that contains a link to the\n top-level landing page for the external system, such as the console page for CodeDeploy.\n This link is shown on the pipeline view page in the CodePipeline console and\n provides a link to the execution entity of the external action.

" + } + }, + "revisionUrlTemplate": { + "target": "com.amazonaws.codepipeline#UrlTemplate", + "traits": { + "smithy.api#documentation": "

The URL returned to the CodePipeline console that contains a link to the page\n where customers can update or change the configuration of the external action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Returns information about the settings for a rule type.

" + } + }, + "com.amazonaws.codepipeline#S3ArtifactLocation": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.codepipeline#S3BucketName", + "traits": { + "smithy.api#documentation": "

The name of the S3 bucket.

", + "smithy.api#required": {} + } + }, + "objectKey": { + "target": "com.amazonaws.codepipeline#S3ObjectKey", + "traits": { + "smithy.api#documentation": "

The key of the object in the S3 bucket, which uniquely identifies the object in the\n bucket.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The location of the S3 bucket that contains a revision.

" + } + }, + "com.amazonaws.codepipeline#S3Bucket": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + } + } + }, + "com.amazonaws.codepipeline#S3BucketName": { + "type": "string" + }, + "com.amazonaws.codepipeline#S3Key": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.codepipeline#S3Location": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.codepipeline#S3Bucket", + "traits": { + "smithy.api#documentation": "

The Amazon S3 artifact bucket for an action's artifacts.

" + } + }, + "key": { + "target": "com.amazonaws.codepipeline#S3Key", + "traits": { + "smithy.api#documentation": "

The artifact name.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon S3 artifact location for an action's artifacts.

" + } + }, + "com.amazonaws.codepipeline#S3ObjectKey": { + "type": "string" + }, + "com.amazonaws.codepipeline#SecretAccessKey": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.codepipeline#ServicePrincipal": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.codepipeline#SessionToken": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.codepipeline#SourceRevision": { + "type": "structure", + "members": { + "actionName": { + "target": "com.amazonaws.codepipeline#ActionName", + "traits": { + "smithy.api#documentation": "

The name of the action that processed the revision to the source\n artifact.

", + "smithy.api#required": {} + } + }, + "revisionId": { + "target": "com.amazonaws.codepipeline#Revision", + "traits": { + "smithy.api#documentation": "

The system-generated unique ID that identifies the revision number of the\n artifact.

" + } + }, + "revisionSummary": { + "target": "com.amazonaws.codepipeline#RevisionSummary", + "traits": { + "smithy.api#documentation": "

Summary information about the most recent revision of the artifact. For GitHub and\n CodeCommit repositories, the commit message. For Amazon S3 buckets or actions,\n the user-provided content of a codepipeline-artifact-revision-summary key\n specified in the object metadata.

" + } + }, + "revisionUrl": { + "target": "com.amazonaws.codepipeline#Url", + "traits": { + "smithy.api#documentation": "

The commit ID for the artifact revision. For artifacts stored in GitHub or\n CodeCommit repositories, the commit ID is linked to a commit details page.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the version (or revision) of a source artifact that initiated a\n pipeline execution.

" + } + }, + "com.amazonaws.codepipeline#SourceRevisionList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#SourceRevision" + } + }, + "com.amazonaws.codepipeline#SourceRevisionOverride": { + "type": "structure", + "members": { + "actionName": { + "target": "com.amazonaws.codepipeline#ActionName", + "traits": { + "smithy.api#documentation": "

The name of the action where the override will be applied.

", + "smithy.api#required": {} + } + }, + "revisionType": { + "target": "com.amazonaws.codepipeline#SourceRevisionType", + "traits": { + "smithy.api#documentation": "

The type of source revision, based on the source provider. For example, the revision\n type for the CodeCommit action provider is the commit ID.

", + "smithy.api#required": {} + } + }, + "revisionValue": { + "target": "com.amazonaws.codepipeline#Revision", + "traits": { + "smithy.api#documentation": "

The source revision, or version of your source artifact, with the changes that you\n want to run in the pipeline execution.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution.

\n \n

For the S3_OBJECT_VERSION_ID and S3_OBJECT_KEY types of source revisions, either\n of the types can be used independently, or they can be used together to override the\n source with a specific ObjectKey and VersionID.

\n
" + } + }, + "com.amazonaws.codepipeline#SourceRevisionOverrideList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#SourceRevisionOverride" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.codepipeline#SourceRevisionType": { + "type": "enum", + "members": { + "COMMIT_ID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMMIT_ID" + } + }, + "IMAGE_DIGEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IMAGE_DIGEST" + } + }, + "S3_OBJECT_VERSION_ID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3_OBJECT_VERSION_ID" + } + }, + "S3_OBJECT_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3_OBJECT_KEY" + } + } + } + }, + "com.amazonaws.codepipeline#StageActionDeclarationList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#ActionDeclaration" + } + }, + "com.amazonaws.codepipeline#StageBlockerDeclarationList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#BlockerDeclaration" + } + }, + "com.amazonaws.codepipeline#StageConditionState": { + "type": "structure", + "members": { + "latestExecution": { + "target": "com.amazonaws.codepipeline#StageConditionsExecution", + "traits": { + "smithy.api#documentation": "

Represents information about the latest run of a condition for a stage.

" + } + }, + "conditionStates": { + "target": "com.amazonaws.codepipeline#ConditionStateList", + "traits": { + "smithy.api#documentation": "

The states of the conditions for a run of a condition for a stage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The state of a run of a condition for a stage.

" + } + }, + "com.amazonaws.codepipeline#StageConditionsExecution": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.codepipeline#ConditionExecutionStatus", + "traits": { + "smithy.api#documentation": "

The status of a run of a condition for a stage.

" + } + }, + "summary": { + "target": "com.amazonaws.codepipeline#ExecutionSummary", + "traits": { + "smithy.api#documentation": "

A summary of the run of the condition for a stage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents information about the run of a condition for a stage.

" + } + }, + "com.amazonaws.codepipeline#StageContext": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.codepipeline#StageName", + "traits": { + "smithy.api#documentation": "

The name of the stage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents information about a stage to a job worker.

" + } + }, + "com.amazonaws.codepipeline#StageDeclaration": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.codepipeline#StageName", + "traits": { + "smithy.api#documentation": "

The name of the stage.

", + "smithy.api#required": {} + } + }, + "blockers": { + "target": "com.amazonaws.codepipeline#StageBlockerDeclarationList", + "traits": { + "smithy.api#documentation": "

Reserved for future use.

" + } + }, + "actions": { + "target": "com.amazonaws.codepipeline#StageActionDeclarationList", + "traits": { + "smithy.api#documentation": "

The actions included in a stage.

", + "smithy.api#required": {} + } + }, + "onFailure": { + "target": "com.amazonaws.codepipeline#FailureConditions", + "traits": { + "smithy.api#documentation": "

The method to use when a stage has not completed successfully. For example,\n configuring this field for rollback will roll back a failed stage automatically to the\n last successful pipeline execution in the stage.

" + } + }, + "onSuccess": { + "target": "com.amazonaws.codepipeline#SuccessConditions", + "traits": { + "smithy.api#documentation": "

The method to use when a stage has succeeded. For example,\n configuring this field for conditions will allow the stage to succeed when the conditions are met.

" + } + }, + "beforeEntry": { + "target": "com.amazonaws.codepipeline#BeforeEntryConditions", + "traits": { + "smithy.api#documentation": "

The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents information about a stage and its definition.

" + } + }, + "com.amazonaws.codepipeline#StageExecution": { + "type": "structure", + "members": { + "pipelineExecutionId": { + "target": "com.amazonaws.codepipeline#PipelineExecutionId", + "traits": { + "smithy.api#documentation": "

The ID of the pipeline execution associated with the stage.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.codepipeline#StageExecutionStatus", + "traits": { + "smithy.api#documentation": "

The status of the stage, or for a completed stage, the last status of the\n stage.

\n \n

A status of cancelled means that the pipeline’s definition was updated before the\n stage execution could be completed.

\n
", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.codepipeline#ExecutionType", + "traits": { + "smithy.api#documentation": "

The type of pipeline execution for the stage, such as a rollback pipeline\n execution.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents information about the run of a stage.

" + } + }, + "com.amazonaws.codepipeline#StageExecutionList": { "type": "list", "member": { "target": "com.amazonaws.codepipeline#StageExecution" @@ -7724,6 +8908,24 @@ "traits": { "smithy.api#documentation": "

Information about the latest execution in the stage, including its ID and\n status.

" } + }, + "beforeEntryConditionState": { + "target": "com.amazonaws.codepipeline#StageConditionState", + "traits": { + "smithy.api#documentation": "

The state of the entry conditions for a stage.

" + } + }, + "onSuccessConditionState": { + "target": "com.amazonaws.codepipeline#StageConditionState", + "traits": { + "smithy.api#documentation": "

The state of the success conditions for a stage.

" + } + }, + "onFailureConditionState": { + "target": "com.amazonaws.codepipeline#StageConditionState", + "traits": { + "smithy.api#documentation": "

The state of the failure conditions for a stage.

" + } } }, "traits": { @@ -7964,6 +9166,21 @@ "smithy.api#documentation": "

Filter for pipeline executions that have successfully completed the stage in the\n current pipeline version.

" } }, + "com.amazonaws.codepipeline#SuccessConditions": { + "type": "structure", + "members": { + "conditions": { + "target": "com.amazonaws.codepipeline#ConditionList", + "traits": { + "smithy.api#documentation": "

The conditions that are success conditions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The conditions for making checks that, if met, succeed a stage.

" + } + }, "com.amazonaws.codepipeline#Tag": { "type": "structure", "members": { diff --git a/models/cognito-identity-provider.json b/models/cognito-identity-provider.json index a10adaa7e3..553b427ce9 100644 --- a/models/cognito-identity-provider.json +++ b/models/cognito-identity-provider.json @@ -366,7 +366,7 @@ "name": "cognito-idp" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To\n authenticate users from third-party identity providers (IdPs) in this API, you can\n link IdP users to native user profiles. Learn more\n about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference.

\n

This API reference provides detailed information about API operations and object types\n in Amazon Cognito.

\n

Along with resource management operations, the Amazon Cognito user pools API includes classes\n of operations and authorization models for client-side and server-side authentication of\n users. You can interact with operations in the Amazon Cognito user pools API as any of the\n following subjects.

\n
    \n
  1. \n

    An administrator who wants to configure user pools, app clients, users,\n groups, or other user pool functions.

    \n
  2. \n
  3. \n

    A server-side app, like a web application, that wants to use its Amazon Web Services\n privileges to manage, authenticate, or authorize a user.

    \n
  4. \n
  5. \n

    A client-side app, like a mobile app, that wants to make unauthenticated\n requests to manage, authenticate, or authorize a user.

    \n
  6. \n
\n

For more information, see Using the Amazon Cognito user pools API and user pool endpoints\n in the Amazon Cognito Developer Guide.

\n

With your Amazon Web Services SDK, you can build the logic to support operational flows in every use\n case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started\n with the CognitoIdentityProvider client in other supported Amazon Web Services\n SDKs.

\n \n

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services\n SDKs.

", + "smithy.api#documentation": "

With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To\n authenticate users from third-party identity providers (IdPs) in this API, you can\n link IdP users to native user profiles. Learn more\n about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference.

\n

This API reference provides detailed information about API operations and object types\n in Amazon Cognito.

\n

Along with resource management operations, the Amazon Cognito user pools API includes classes\n of operations and authorization models for client-side and server-side authentication of\n users. You can interact with operations in the Amazon Cognito user pools API as any of the\n following subjects.

\n
    \n
  1. \n

    An administrator who wants to configure user pools, app clients, users,\n groups, or other user pool functions.

    \n
  2. \n
  3. \n

    A server-side app, like a web application, that wants to use its Amazon Web Services\n privileges to manage, authenticate, or authorize a user.

    \n
  4. \n
  5. \n

    A client-side app, like a mobile app, that wants to make unauthenticated\n requests to manage, authenticate, or authorize a user.

    \n
  6. \n
\n

For more information, see Using the Amazon Cognito user pools API and user pool endpoints\n in the Amazon Cognito Developer Guide.

\n

With your Amazon Web Services SDK, you can build the logic to support operational flows in every use\n case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started\n with the CognitoIdentityProvider client in other supported Amazon Web Services\n SDKs.

\n \n

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services\n SDKs.

", "smithy.api#title": "Amazon Cognito Identity Provider", "smithy.api#xmlNamespace": { "uri": "http://cognito-idp.amazonaws.com/doc/2016-04-18/" @@ -1570,7 +1570,7 @@ } ], "traits": { - "smithy.api#documentation": "

This IAM-authenticated API operation provides a code that Amazon Cognito sent to your user\n when they signed up in your user pool. After your user enters their code, they confirm\n ownership of the email address or phone number that they provided, and their user\n account becomes active. Depending on your user pool configuration, your users will\n receive their confirmation code in an email or SMS message.

\n

Local users who signed up in your user pool are the only type of user who can confirm\n sign-up with a code. Users who federate through an external identity provider (IdP) have\n already been confirmed by their IdP. Administrator-created users confirm their accounts\n when they respond to their invitation email message and choose a password.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

This IAM-authenticated API operation confirms user sign-up as an administrator.\n Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation.\n No confirmation code is required.

\n

This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can\n configure your user pool to not send confirmation codes to new users and instead confirm\n them with this API operation on the back end.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminConfirmSignUpRequest": { @@ -1669,7 +1669,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new user in the specified user pool.

\n

If MessageAction isn't set, the default is to send a welcome message via\n email or phone (SMS).

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

This message is based on a template that you configured in your call to create or\n update a user pool. This template includes your custom sign-up instructions and\n placeholders for user name and temporary password.

\n

Alternatively, you can call AdminCreateUser with SUPPRESS\n for the MessageAction parameter, and Amazon Cognito won't send any email.

\n

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until\n they sign in and change their password.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", + "smithy.api#documentation": "

Creates a new user in the specified user pool.

\n

If MessageAction isn't set, the default is to send a welcome message via\n email or phone (SMS).

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

This message is based on a template that you configured in your call to create or\n update a user pool. This template includes your custom sign-up instructions and\n placeholders for user name and temporary password.

\n

Alternatively, you can call AdminCreateUser with SUPPRESS\n for the MessageAction parameter, and Amazon Cognito won't send any email.

\n

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until\n they sign in and change their password.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", "smithy.api#examples": [ { "title": "An AdminCreateUser request for for a test user named John.", @@ -2481,7 +2481,7 @@ } ], "traits": { - "smithy.api#documentation": "

Initiates the authentication flow, as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Initiates the authentication flow, as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminInitiateAuthRequest": { @@ -3031,7 +3031,7 @@ } ], "traits": { - "smithy.api#documentation": "

Resets the specified user's password in a user pool as an administrator. Works on any\n user.

\n

To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Deactivates a user's password, requiring them to change it. If a user tries to sign in\n after the API is called, Amazon Cognito responds with a\n PasswordResetRequiredException error. Your app must then perform the\n actions that reset your user's password: the forgot-password flow. In addition, if the\n user pool has phone verification selected and a verified phone number exists for the\n user, or if email verification is selected and a verified email exists for the user,\n calling this API will also result in sending a message to the end user with the code to\n change their password.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Resets the specified user's password in a user pool as an administrator. Works on any\n user.

\n

To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Deactivates a user's password, requiring them to change it. If a user tries to sign in\n after the API is called, Amazon Cognito responds with a\n PasswordResetRequiredException error. Your app must then perform the\n actions that reset your user's password: the forgot-password flow. In addition, if the\n user pool has phone verification selected and a verified phone number exists for the\n user, or if email verification is selected and a verified email exists for the user,\n calling this API will also result in sending a message to the end user with the code to\n change their password.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminResetUserPasswordRequest": { @@ -3116,6 +3116,9 @@ { "target": "com.amazonaws.cognitoidentityprovider#NotAuthorizedException" }, + { + "target": "com.amazonaws.cognitoidentityprovider#PasswordHistoryPolicyViolationException" + }, { "target": "com.amazonaws.cognitoidentityprovider#PasswordResetRequiredException" }, @@ -3142,7 +3145,7 @@ } ], "traits": { - "smithy.api#documentation": "

Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. An AdminRespondToAuthChallenge API request provides the answer\n to that challenge, like a code or a secure remote password (SRP). The parameters of a\n response to an authentication challenge vary with the type of challenge.

\n

For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. An AdminRespondToAuthChallenge API request provides the answer\n to that challenge, like a code or a secure remote password (SRP). The parameters of a\n response to an authentication challenge vary with the type of challenge.

\n

For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminRespondToAuthChallengeRequest": { @@ -3335,6 +3338,9 @@ { "target": "com.amazonaws.cognitoidentityprovider#NotAuthorizedException" }, + { + "target": "com.amazonaws.cognitoidentityprovider#PasswordHistoryPolicyViolationException" + }, { "target": "com.amazonaws.cognitoidentityprovider#ResourceNotFoundException" }, @@ -3666,7 +3672,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Updates the specified user's attributes, including developer attributes, as an\n administrator. Works on any user. To delete an attribute from your user, submit the\n attribute in your API request with a blank value.

\n

For custom attributes, you must prepend the custom: prefix to the\n attribute name.

\n

In addition to updating user attributes, this API can also be used to mark phone and\n email as verified.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Updates the specified user's attributes, including developer attributes, as an\n administrator. Works on any user. To delete an attribute from your user, submit the\n attribute in your API request with a blank value.

\n

For custom attributes, you must prepend the custom: prefix to the\n attribute name.

\n

In addition to updating user attributes, this API can also be used to mark phone and\n email as verified.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminUpdateUserAttributesRequest": { @@ -3776,6 +3782,37 @@ "smithy.api#output": {} } }, + "com.amazonaws.cognitoidentityprovider#AdvancedSecurityAdditionalFlowsType": { + "type": "structure", + "members": { + "CustomAuthMode": { + "target": "com.amazonaws.cognitoidentityprovider#AdvancedSecurityEnabledModeType", + "traits": { + "smithy.api#documentation": "

The operating mode of advanced security features in custom authentication with \n \n Custom authentication challenge Lambda triggers.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Advanced security configuration options for additional authentication types in your\n user pool, including custom\n authentication.

" + } + }, + "com.amazonaws.cognitoidentityprovider#AdvancedSecurityEnabledModeType": { + "type": "enum", + "members": { + "AUDIT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUDIT" + } + }, + "ENFORCED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENFORCED" + } + } + } + }, "com.amazonaws.cognitoidentityprovider#AdvancedSecurityModeType": { "type": "enum", "members": { @@ -3940,7 +3977,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA)\n for a user, with a unique private key that Amazon Cognito generates and returns in the API\n response. You can authorize an AssociateSoftwareToken request with either\n the user's access token, or a session string from a challenge response that you received\n from Amazon Cognito.

\n \n

Amazon Cognito disassociates an existing software token when you verify the new token in a\n VerifySoftwareToken API request. If you don't verify the software\n token and your user pool doesn't require MFA, the user can then authenticate with\n user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito\n generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge\n each time your user signs. Complete setup with AssociateSoftwareToken\n and VerifySoftwareToken.

\n

After you set up software token MFA for your user, Amazon Cognito generates a\n SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to\n this challenge with your user's TOTP.

\n
\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA)\n for a user, with a unique private key that Amazon Cognito generates and returns in the API\n response. You can authorize an AssociateSoftwareToken request with either\n the user's access token, or a session string from a challenge response that you received\n from Amazon Cognito.

\n \n

Amazon Cognito disassociates an existing software token when you verify the new token in a\n VerifySoftwareToken API request. If you don't verify the software\n token and your user pool doesn't require MFA, the user can then authenticate with\n user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito\n generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge\n each time your user signs in. Complete setup with\n AssociateSoftwareToken and VerifySoftwareToken.

\n

After you set up software token MFA for your user, Amazon Cognito generates a\n SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to\n this challenge with your user's TOTP.

\n
\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -4480,6 +4517,9 @@ { "target": "com.amazonaws.cognitoidentityprovider#NotAuthorizedException" }, + { + "target": "com.amazonaws.cognitoidentityprovider#PasswordHistoryPolicyViolationException" + }, { "target": "com.amazonaws.cognitoidentityprovider#PasswordResetRequiredException" }, @@ -4607,7 +4647,7 @@ } }, "traits": { - "smithy.api#documentation": "

The CloudWatch logging destination of a user pool detailed activity logging\n configuration.

" + "smithy.api#documentation": "

Configuration for the CloudWatch log group destination of user pool detailed activity\n logging, or of user activity log export with advanced security features.

" } }, "com.amazonaws.cognitoidentityprovider#CodeDeliveryDetailsListType": { @@ -4895,6 +4935,9 @@ { "target": "com.amazonaws.cognitoidentityprovider#NotAuthorizedException" }, + { + "target": "com.amazonaws.cognitoidentityprovider#PasswordHistoryPolicyViolationException" + }, { "target": "com.amazonaws.cognitoidentityprovider#ResourceNotFoundException" }, @@ -5561,7 +5604,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Creates a new Amazon Cognito user pool and sets the password policy for the\n pool.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", + "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Creates a new Amazon Cognito user pool and sets the password policy for the\n pool.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", "smithy.api#examples": [ { "title": "Example user pool with email and username sign-in", @@ -6297,7 +6340,7 @@ "PreventUserExistenceErrors": { "target": "com.amazonaws.cognitoidentityprovider#PreventUserExistenceErrorTypes", "traits": { - "smithy.api#documentation": "

Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY, those APIs return a\n UserNotFoundException exception if the user doesn't exist in the user\n pool.

\n

Valid values include:

\n
    \n
  • \n

    \n ENABLED - This prevents user existence-related errors.

    \n
  • \n
  • \n

    \n LEGACY - This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.

    \n
  • \n
" + "smithy.api#documentation": "

Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY, those APIs return a\n UserNotFoundException exception if the user doesn't exist in the user\n pool.

\n

Valid values include:

\n
    \n
  • \n

    \n ENABLED - This prevents user existence-related errors.

    \n
  • \n
  • \n

    \n LEGACY - This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.

    \n
  • \n
\n

Defaults to LEGACY when you don't provide a value.

" } }, "EnableTokenRevocation": { @@ -8188,6 +8231,12 @@ "traits": { "smithy.api#enumValue": "userNotification" } + }, + "USER_AUTH_EVENTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "userAuthEvents" + } } } }, @@ -8318,6 +8367,20 @@ } } }, + "com.amazonaws.cognitoidentityprovider#FirehoseConfigurationType": { + "type": "structure", + "members": { + "StreamArn": { + "target": "com.amazonaws.cognitoidentityprovider#ArnType", + "traits": { + "smithy.api#documentation": "

The ARN of an Amazon Data Firehose stream that's the destination for advanced security\n features log export.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration for the Amazon Data Firehose stream destination of user activity log export with\n advanced security features.

" + } + }, "com.amazonaws.cognitoidentityprovider#ForbiddenException": { "type": "structure", "members": { @@ -8465,7 +8528,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Calling this API causes a message to be sent to the end user with a confirmation code\n that is required to change the user's password. For the Username parameter,\n you can use the username or user alias. The method used to send the confirmation code is\n sent according to the specified AccountRecoverySetting. For more information, see Recovering\n User Accounts in the Amazon Cognito Developer Guide. To\n use the confirmation code for resetting the password, call ConfirmForgotPassword.

\n

If neither a verified phone number nor a verified email exists, this API returns\n InvalidParameterException. If your app client has a client secret and\n you don't provide a SECRET_HASH parameter, this API returns\n NotAuthorizedException.

\n

To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

Calling this API causes a message to be sent to the end user with a confirmation code\n that is required to change the user's password. For the Username parameter,\n you can use the username or user alias. The method used to send the confirmation code is\n sent according to the specified AccountRecoverySetting. For more information, see Recovering\n User Accounts in the Amazon Cognito Developer Guide. To\n use the confirmation code for resetting the password, call ConfirmForgotPassword.

\n

If neither a verified phone number nor a verified email exists, this API returns\n InvalidParameterException. If your app client has a client secret and\n you don't provide a SECRET_HASH parameter, this API returns\n NotAuthorizedException.

\n

To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#optionalAuth": {} } }, @@ -8844,7 +8907,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the detailed activity logging configuration for a user pool.

" + "smithy.api#documentation": "

Gets the logging configuration of a user pool.

" } }, "com.amazonaws.cognitoidentityprovider#GetLogDeliveryConfigurationRequest": { @@ -8853,7 +8916,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The ID of the user pool where you want to view detailed activity logging\n configuration.

", + "smithy.api#documentation": "

The ID of the user pool that has the logging configuration that you want to\n view.

", "smithy.api#required": {} } } @@ -8868,7 +8931,7 @@ "LogDeliveryConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#LogDeliveryConfigurationType", "traits": { - "smithy.api#documentation": "

The detailed activity logging configuration of the requested user pool.

" + "smithy.api#documentation": "

The logging configuration of the requested user pool.

" } } }, @@ -9101,7 +9164,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Generates a user attribute verification code for the specified attribute name. Sends a\n message to a user with a code that they must return in a VerifyUserAttribute\n request.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

Generates a user attribute verification code for the specified attribute name. Sends a\n message to a user with a code that they must return in a VerifyUserAttribute\n request.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#optionalAuth": {} } }, @@ -9648,7 +9711,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user\n with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user\n with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#examples": [ { "title": "Example username and password sign-in for a user who has TOTP MFA", @@ -9890,7 +9953,7 @@ } }, "traits": { - "smithy.api#documentation": "

This exception is thrown when the trust relationship is not valid for the role\n provided for SMS configuration. This can happen if you don't trust\n cognito-idp.amazonaws.com or the external ID provided in the role does\n not match what is provided in the SMS configuration for the user pool.

", + "smithy.api#documentation": "

This exception is thrown when the trust relationship is not valid for the role\n provided for SMS configuration. This can happen if you don't trust\n cognito-idp.amazonaws.com or the external ID provided in the role does\n not match what is provided in the SMS configuration for the user pool.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -10962,7 +11025,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 1 + "max": 2 } } }, @@ -10972,21 +11035,33 @@ "LogLevel": { "target": "com.amazonaws.cognitoidentityprovider#LogLevel", "traits": { - "smithy.api#documentation": "

The errorlevel selection of logs that a user pool sends for detailed\n activity logging.

", + "smithy.api#documentation": "

The errorlevel selection of logs that a user pool sends for detailed\n activity logging. To send userNotification activity with information about message delivery, choose ERROR with\n CloudWatchLogsConfiguration. To send userAuthEvents\n activity with user logs from advanced security features, choose INFO with\n one of CloudWatchLogsConfiguration, FirehoseConfiguration, or\n S3Configuration.

", "smithy.api#required": {} } }, "EventSource": { "target": "com.amazonaws.cognitoidentityprovider#EventSourceName", "traits": { - "smithy.api#documentation": "

The source of events that your user pool sends for detailed activity logging.

", + "smithy.api#documentation": "

The source of events that your user pool sends for logging. To send error-level logs\n about user notification activity, set to userNotification. To send\n info-level logs about advanced security features user activity, set to\n userAuthEvents.

", "smithy.api#required": {} } }, "CloudWatchLogsConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#CloudWatchLogsConfigurationType", "traits": { - "smithy.api#documentation": "

The CloudWatch logging destination of a user pool.

" + "smithy.api#documentation": "

The CloudWatch log group destination of user pool detailed activity logs, or of user\n activity log export with advanced security features.

" + } + }, + "S3Configuration": { + "target": "com.amazonaws.cognitoidentityprovider#S3ConfigurationType", + "traits": { + "smithy.api#documentation": "

The Amazon S3 bucket destination of user activity log export with advanced security\n features. To activate this setting, \n advanced security features must be active in your user pool.

" + } + }, + "FirehoseConfiguration": { + "target": "com.amazonaws.cognitoidentityprovider#FirehoseConfigurationType", + "traits": { + "smithy.api#documentation": "

The Amazon Data Firehose stream destination of user activity log export with advanced security\n features. To activate this setting, \n advanced security features must be active in your user pool.

" } } }, @@ -11000,20 +11075,20 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The ID of the user pool where you configured detailed activity logging.

", + "smithy.api#documentation": "

The ID of the user pool where you configured logging.

", "smithy.api#required": {} } }, "LogConfigurations": { "target": "com.amazonaws.cognitoidentityprovider#LogConfigurationListType", "traits": { - "smithy.api#documentation": "

The detailed activity logging destination of a user pool.

", + "smithy.api#documentation": "

A logging destination of a user pool. User pools can have multiple logging\n destinations for message-delivery and user-activity logs.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The logging parameters of a user pool.

" + "smithy.api#documentation": "

The logging parameters of a user pool returned in response to\n GetLogDeliveryConfiguration.

" } }, "com.amazonaws.cognitoidentityprovider#LogLevel": { @@ -11024,6 +11099,12 @@ "traits": { "smithy.api#enumValue": "ERROR" } + }, + "INFO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INFO" + } } } }, @@ -11315,6 +11396,28 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.cognitoidentityprovider#PasswordHistoryPolicyViolationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.cognitoidentityprovider#MessageType" + } + }, + "traits": { + "smithy.api#documentation": "

The message returned when a user's new password matches a previous password and \n doesn't comply with the password-history policy.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cognitoidentityprovider#PasswordHistorySizeType": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 24 + } + } + }, "com.amazonaws.cognitoidentityprovider#PasswordPolicyMinLengthType": { "type": "integer", "traits": { @@ -11361,6 +11464,12 @@ "smithy.api#documentation": "

In the password policy that you have set, refers to whether you have required users to\n use at least one symbol in their password.

" } }, + "PasswordHistorySize": { + "target": "com.amazonaws.cognitoidentityprovider#PasswordHistorySizeType", + "traits": { + "smithy.api#documentation": "

The number of previous passwords that you want Amazon Cognito to restrict each user from\n reusing. Users can't set a password that matches any of n previous\n passwords, where n is the value of PasswordHistorySize.

\n

Password history isn't enforced and isn't displayed in DescribeUserPool responses when you set this value to\n 0 or don't provide it. To activate this setting, \n advanced security features must be active in your user pool.

" + } + }, "TemporaryPasswordValidityDays": { "target": "com.amazonaws.cognitoidentityprovider#TemporaryPasswordValidityDaysType", "traits": { @@ -11767,7 +11876,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Resends the confirmation (for confirmation of registration) to a specific user in the\n user pool.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

Resends the confirmation (for confirmation of registration) to a specific user in the\n user pool.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#optionalAuth": {} } }, @@ -12008,6 +12117,9 @@ { "target": "com.amazonaws.cognitoidentityprovider#NotAuthorizedException" }, + { + "target": "com.amazonaws.cognitoidentityprovider#PasswordHistoryPolicyViolationException" + }, { "target": "com.amazonaws.cognitoidentityprovider#PasswordResetRequiredException" }, @@ -12035,7 +12147,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. A RespondToAuthChallenge API request provides the answer to that\n challenge, like a code or a secure remote password (SRP). The parameters of a response\n to an authentication challenge vary with the type of challenge.

\n

For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. A RespondToAuthChallenge API request provides the answer to that\n challenge, like a code or a secure remote password (SRP). The parameters of a response\n to an authentication challenge vary with the type of challenge.

\n

For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#optionalAuth": {} } }, @@ -12307,6 +12419,16 @@ } } }, + "com.amazonaws.cognitoidentityprovider#S3ArnType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 1024 + }, + "smithy.api#pattern": "^arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:::[\\w+=/,.@-]+(:[\\w+=/,.@-]+)?(:[\\w+=/,.@-]+)?$" + } + }, "com.amazonaws.cognitoidentityprovider#S3BucketType": { "type": "string", "traits": { @@ -12317,6 +12439,20 @@ "smithy.api#pattern": "^[0-9A-Za-z\\.\\-_]*(?The ARN of an Amazon S3 bucket that's the destination for advanced security features\n log export.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration for the Amazon S3 bucket destination of user activity log export with\n advanced security features.

" + } + }, "com.amazonaws.cognitoidentityprovider#SESConfigurationSet": { "type": "string", "traits": { @@ -12521,7 +12657,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets up or modifies the detailed activity logging configuration of a user pool.

" + "smithy.api#documentation": "

Sets up or modifies the logging configuration of a user pool. User pools can export\n user notification logs and advanced security features user activity logs.

" } }, "com.amazonaws.cognitoidentityprovider#SetLogDeliveryConfigurationRequest": { @@ -12530,14 +12666,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The ID of the user pool where you want to configure detailed activity logging .

", + "smithy.api#documentation": "

The ID of the user pool where you want to configure logging.

", "smithy.api#required": {} } }, "LogConfigurations": { "target": "com.amazonaws.cognitoidentityprovider#LogConfigurationListType", "traits": { - "smithy.api#documentation": "

A collection of all of the detailed activity logging configurations for a user\n pool.

", + "smithy.api#documentation": "

A collection of the logging configurations for a user pool.

", "smithy.api#required": {} } } @@ -12838,7 +12974,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets the user pool multi-factor authentication (MFA) configuration.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
" + "smithy.api#documentation": "

Sets the user pool multi-factor authentication (MFA) configuration.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
" } }, "com.amazonaws.cognitoidentityprovider#SetUserPoolMfaConfigRequest": { @@ -13031,7 +13167,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Registers the user in the specified user pool and creates a user name, password, and\n user attributes.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

Registers the user in the specified user pool and creates a user name, password, and\n user attributes.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#optionalAuth": {} } }, @@ -13121,7 +13257,7 @@ "UserSub": { "target": "com.amazonaws.cognitoidentityprovider#StringType", "traits": { - "smithy.api#documentation": "

The UUID of the authenticated user. This isn't the same as\n username.

", + "smithy.api#documentation": "

The 128-bit ID of the authenticated user. This isn't the same as\n username.

", "smithy.api#required": {} } } @@ -14316,7 +14452,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

With this operation, your users can update one or more of their attributes with their\n own credentials. You authorize this API request with the user's access token. To delete\n an attribute from your user, submit the attribute in your API request with a blank\n value. Custom attribute values in this request must include the custom:\n prefix.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", + "smithy.api#documentation": "

With this operation, your users can update one or more of their attributes with their\n own credentials. You authorize this API request with the user's access token. To delete\n an attribute from your user, submit the attribute in your API request with a blank\n value. Custom attribute values in this request must include the custom:\n prefix.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
", "smithy.api#optionalAuth": {} } }, @@ -14408,7 +14544,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Updates the specified user pool with the specified attributes. You can get a list of\n the current user pool settings using DescribeUserPool.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Updates the specified user pool with the specified attributes. You can get a list of\n the current user pool settings using DescribeUserPool.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#UpdateUserPoolClient": { @@ -14567,7 +14703,7 @@ "PreventUserExistenceErrors": { "target": "com.amazonaws.cognitoidentityprovider#PreventUserExistenceErrorTypes", "traits": { - "smithy.api#documentation": "

Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY, those APIs return a\n UserNotFoundException exception if the user doesn't exist in the user\n pool.

\n

Valid values include:

\n
    \n
  • \n

    \n ENABLED - This prevents user existence-related errors.

    \n
  • \n
  • \n

    \n LEGACY - This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.

    \n
  • \n
" + "smithy.api#documentation": "

Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY, those APIs return a\n UserNotFoundException exception if the user doesn't exist in the user\n pool.

\n

Valid values include:

\n
    \n
  • \n

    \n ENABLED - This prevents user existence-related errors.

    \n
  • \n
  • \n

    \n LEGACY - This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.

    \n
  • \n
\n

Defaults to LEGACY when you don't provide a value.

" } }, "EnableTokenRevocation": { @@ -15122,9 +15258,15 @@ "AdvancedSecurityMode": { "target": "com.amazonaws.cognitoidentityprovider#AdvancedSecurityModeType", "traits": { - "smithy.api#documentation": "

The operating mode of advanced security features in your user pool.

", + "smithy.api#documentation": "

The operating mode of advanced security features for standard authentication types\n in your user pool, including username-password and secure remote password (SRP)\n authentication.\n

", "smithy.api#required": {} } + }, + "AdvancedSecurityAdditionalFlows": { + "target": "com.amazonaws.cognitoidentityprovider#AdvancedSecurityAdditionalFlowsType", + "traits": { + "smithy.api#documentation": "

Advanced security configuration options for additional authentication types in your\n user pool, including custom\n authentication.

" + } } }, "traits": { @@ -15297,7 +15439,7 @@ "PreventUserExistenceErrors": { "target": "com.amazonaws.cognitoidentityprovider#PreventUserExistenceErrorTypes", "traits": { - "smithy.api#documentation": "

Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY, those APIs return a\n UserNotFoundException exception if the user doesn't exist in the user\n pool.

\n

Valid values include:

\n
    \n
  • \n

    \n ENABLED - This prevents user existence-related errors.

    \n
  • \n
  • \n

    \n LEGACY - This represents the old behavior of Amazon Cognito where user\n existence related errors aren't prevented.

    \n
  • \n
" + "smithy.api#documentation": "

Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY, those APIs return a\n UserNotFoundException exception if the user doesn't exist in the user\n pool.

\n

Valid values include:

\n
    \n
  • \n

    \n ENABLED - This prevents user existence-related errors.

    \n
  • \n
  • \n

    \n LEGACY - This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.

    \n
  • \n
\n

Defaults to LEGACY when you don't provide a value.

" } }, "EnableTokenRevocation": { diff --git a/models/compute-optimizer.json b/models/compute-optimizer.json index 6e23f90145..3af0e8d943 100644 --- a/models/compute-optimizer.json +++ b/models/compute-optimizer.json @@ -2328,7 +2328,7 @@ "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

\n The value of the filter.\n

\n

The valid values for this parameter are as follows:

\n
    \n
  • \n

    If you specify the name parameter as Finding, specify\n Optimized, NotOptimized, or Unavailable.

    \n
  • \n
  • \n

    If you specify the name parameter as FindingReasonCode, specify\n CPUUnderprovisioned, CPUOverprovisioned, \n MemoryUnderprovisioned, or MemoryOverprovisioned.

    \n
  • \n
" + "smithy.api#documentation": "

\n The value of the filter.\n

\n

The valid values for this parameter are as follows:

\n
    \n
  • \n

    If you specify the name parameter as Finding, specify\n Optimized, Underprovisioned, or Overprovisioned.

    \n
  • \n
  • \n

    If you specify the name parameter as FindingReasonCode, specify\n CPUUnderprovisioned, CPUOverprovisioned, \n MemoryUnderprovisioned, or MemoryOverprovisioned.

    \n
  • \n
" } } }, @@ -9361,7 +9361,7 @@ "lookBackPeriod": { "target": "com.amazonaws.computeoptimizer#LookBackPeriodPreference", "traits": { - "smithy.api#documentation": "

\n The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n When this preference isn't specified, we use the default value DAYS_14.\n

\n \n

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

\n
" + "smithy.api#documentation": "

\n The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n When this preference isn't specified, we use the default value DAYS_14.\n

\n

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.\n

\n \n
    \n
  • \n

    Amazon EC2 instance lookback preferences can be set at the organization, account, and resource levels.

    \n
  • \n
  • \n

    Auto Scaling group lookback preferences can only be set at the resource level.

    \n
  • \n
\n
" } }, "utilizationPreferences": { diff --git a/models/config-service.json b/models/config-service.json index 9e4f4565fd..ef795d66e7 100644 --- a/models/config-service.json +++ b/models/config-service.json @@ -8689,7 +8689,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "\\S" + "smithy.api#pattern": "^[A-Za-z0-9-_]+$" } }, "com.amazonaws.configservice#OrganizationConfigRuleNames": { diff --git a/models/connect-contact-lens.json b/models/connect-contact-lens.json index f26cec0a2c..d696f32dc8 100644 --- a/models/connect-contact-lens.json +++ b/models/connect-contact-lens.json @@ -66,7 +66,7 @@ "name": "connect" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Contact Lens for Amazon Connect enables you to analyze conversations between customer and agents,\n by using speech transcription, natural language processing, and intelligent search\n capabilities. It performs sentiment analysis, detects issues, and enables you to automatically\n categorize contacts.

\n

Contact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent\n conversations. For more information, see Analyze conversations using\n Contact Lens in the Amazon Connect Administrator Guide.

", + "smithy.api#documentation": "\n

Amazon Connect Contact Lens enables you to analyze conversations between customer and agents, by using\n speech transcription, natural language processing, and intelligent search capabilities.\n It performs sentiment analysis, detects issues, and enables you to automatically\n categorize contacts.

\n

Amazon Connect Contact Lens provides both real-time and post-call analytics of customer-agent\n conversations. For more information, see Analyze conversations\n using speech analytics in the Amazon Connect Administrator\n Guide.

", "smithy.api#title": "Amazon Connect Contact Lens", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -810,7 +810,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides the category rules that are used to automatically categorize contacts based on\n uttered keywords and phrases.

" + "smithy.api#documentation": "

Provides the category rules that are used to automatically categorize contacts based\n on uttered keywords and phrases.

" } }, "com.amazonaws.connectcontactlens#CategoryDetails": { @@ -868,7 +868,7 @@ } }, "traits": { - "smithy.api#documentation": "

For characters that were detected as issues, where they occur in the transcript.

" + "smithy.api#documentation": "

For characters that were detected as issues, where they occur in the\n transcript.

" } }, "com.amazonaws.connectcontactlens#ContactId": { @@ -930,7 +930,7 @@ } }, "traits": { - "smithy.api#documentation": "

Potential issues that are detected based on an artificial intelligence analysis of each\n turn in the conversation.

" + "smithy.api#documentation": "

Potential issues that are detected based on an artificial intelligence analysis of\n each turn in the conversation.

" } }, "com.amazonaws.connectcontactlens#IssuesDetected": { @@ -1006,7 +1006,7 @@ "MaxResults": { "target": "com.amazonaws.connectcontactlens#MaxResults", "traits": { - "smithy.api#documentation": "

The maximimum number of results to return per page.

" + "smithy.api#documentation": "

The maximum number of results to return per page.

" } }, "NextToken": { @@ -1034,7 +1034,7 @@ "NextToken": { "target": "com.amazonaws.connectcontactlens#NextToken", "traits": { - "smithy.api#documentation": "

If there are additional results, this is the token for the next set of results. If response includes nextToken there are two possible scenarios:

\n
    \n
  • \n

    There are more segments so another call is required to get them.

    \n
  • \n
  • \n

    There are no more segments at this time, but more may be available later (real-time\n analysis is in progress) so the client should call the operation again to get new\n segments.

    \n
  • \n
\n

If response does not include nextToken, the analysis is completed (successfully or failed) and there are no more segments to retrieve.

" + "smithy.api#documentation": "

If there are additional results, this is the token for the next set of results. If response includes nextToken there are two possible\n scenarios:

\n
    \n
  • \n

    There are more segments so another call is required to get them.

    \n
  • \n
  • \n

    There are no more segments at this time, but more may be available later\n (real-time analysis is in progress) so the client should call the operation\n again to get new segments.

    \n
  • \n
\n

If response does not include nextToken, the analysis is completed\n (successfully or failed) and there are no more segments to retrieve.

" } } }, @@ -1155,6 +1155,95 @@ } } }, + "com.amazonaws.connectcontactlens#PostContactSummary": { + "type": "structure", + "members": { + "Content": { + "target": "com.amazonaws.connectcontactlens#PostContactSummaryContent", + "traits": { + "smithy.api#documentation": "

The content of the summary.

" + } + }, + "Status": { + "target": "com.amazonaws.connectcontactlens#PostContactSummaryStatus", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Whether the summary was successfully COMPLETED or FAILED to be generated.

", + "smithy.api#required": {} + } + }, + "FailureCode": { + "target": "com.amazonaws.connectcontactlens#PostContactSummaryFailureCode", + "traits": { + "smithy.api#documentation": "

If the summary failed to be generated, one of the following failure codes\n occurs:

\n
    \n
  • \n

    \n QUOTA_EXCEEDED: The number of concurrent analytics jobs reached\n your service quota.

    \n
  • \n
  • \n

    \n INSUFFICIENT_CONVERSATION_CONTENT: The conversation needs to have\n at least one turn from both the participants in order to generate the\n summary.

    \n
  • \n
  • \n

    \n FAILED_SAFETY_GUIDELINES: The generated summary cannot be\n provided because it failed to meet system safety guidelines.

    \n
  • \n
  • \n

    \n INVALID_ANALYSIS_CONFIGURATION: This code occurs when, for\n example, you're using a \n language \n that isn't supported by generative AI-powered post-contact summaries.\n

    \n
  • \n
  • \n

    \n INTERNAL_ERROR: Internal system error.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the post-contact summary.

" + } + }, + "com.amazonaws.connectcontactlens#PostContactSummaryContent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1762 + } + } + }, + "com.amazonaws.connectcontactlens#PostContactSummaryFailureCode": { + "type": "enum", + "members": { + "QUOTA_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUOTA_EXCEEDED" + } + }, + "INSUFFICIENT_CONVERSATION_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSUFFICIENT_CONVERSATION_CONTENT" + } + }, + "FAILED_SAFETY_GUIDELINES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_SAFETY_GUIDELINES" + } + }, + "INVALID_ANALYSIS_CONFIGURATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ANALYSIS_CONFIGURATION" + } + }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + } + } + }, + "com.amazonaws.connectcontactlens#PostContactSummaryStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + } + } + }, "com.amazonaws.connectcontactlens#RealtimeContactAnalysisSegment": { "type": "structure", "members": { @@ -1169,6 +1258,12 @@ "traits": { "smithy.api#documentation": "

The matched category rules.

" } + }, + "PostContactSummary": { + "target": "com.amazonaws.connectcontactlens#PostContactSummary", + "traits": { + "smithy.api#documentation": "

Information about the post-contact summary.

" + } } }, "traits": { @@ -1255,7 +1350,7 @@ "target": "com.amazonaws.connectcontactlens#ParticipantId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the participant.

", + "smithy.api#documentation": "

The identifier of the participant. Valid values are CUSTOMER or AGENT.

", "smithy.api#required": {} } }, @@ -1295,7 +1390,7 @@ "target": "com.amazonaws.connectcontactlens#SentimentValue", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The sentiment of the detected for this piece of transcript.

", + "smithy.api#documentation": "

The sentiment detected for this piece of transcript.

", "smithy.api#required": {} } }, diff --git a/models/connect.json b/models/connect.json index 3e909398a2..4ac501a5db 100644 --- a/models/connect.json +++ b/models/connect.json @@ -789,12 +789,12 @@ "AgentIds": { "target": "com.amazonaws.connect#AgentIds", "traits": { - "smithy.api#documentation": "

An object to specify a list of agents, by Agent ID.

" + "smithy.api#documentation": "

An object to specify a list of agents, by user ID.

" } } }, "traits": { - "smithy.api#documentation": "

Can be used to define a list of preferred agents to target the contact within the queue.\n Note that agents must have the queue in their routing profile in order to be offered the\n contact.

" + "smithy.api#documentation": "

Can be used to define a list of preferred agents to target the contact to within the queue.\u2028\n Note that agents must have the queue in their routing profile in order to be offered the\u2028\n contact.

" } }, "com.amazonaws.connect#AgentsMinOneMaxHundred": { @@ -829,7 +829,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 2 + "max": 4 } } }, @@ -7150,7 +7150,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Initiates an Amazon Connect instance with all the supported channels enabled. It does\n not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It\n also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.

\n

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.

", + "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Initiates an Amazon Connect instance with all the supported channels enabled. It does\n not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It\n also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.

\n

For more information, see Create an Amazon Connect\n instance in the Amazon Connect Administrator Guide.

\n

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.

", "smithy.api#http": { "method": "PUT", "uri": "/instance", @@ -7556,7 +7556,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new predefined attribute for the specified Amazon Connect instance.

", + "smithy.api#documentation": "

Creates a new predefined attribute for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.

", "smithy.api#http": { "method": "PUT", "uri": "/predefined-attributes/{InstanceId}", @@ -8190,7 +8190,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a security profile.

", + "smithy.api#documentation": "

Creates a security profile.

\n

For information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.

", "smithy.api#http": { "method": "PUT", "uri": "/security-profiles/{InstanceId}", @@ -10088,7 +10088,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Deletes the Amazon Connect instance.

\n

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.

", + "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Deletes the Amazon Connect instance. For more information, see Delete your\n Amazon Connect instance in the Amazon Connect Administrator\n Guide.

\n

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.

", "smithy.api#http": { "method": "DELETE", "uri": "/instance/{InstanceId}", @@ -10316,7 +10316,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a queue.

", + "smithy.api#documentation": "

Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.

", "smithy.api#http": { "method": "DELETE", "uri": "/queues/{InstanceId}/{QueueId}", @@ -12027,7 +12027,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a predefined attribute for the specified Amazon Connect instance.

", + "smithy.api#documentation": "

Describes a predefined attribute for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.

", "smithy.api#http": { "method": "GET", "uri": "/predefined-attributes/{InstanceId}/{Name}", @@ -12460,7 +12460,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets basic information about the security profle.

", + "smithy.api#documentation": "

Gets basic information about the security profile.

\n

For information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.

", "smithy.api#http": { "method": "GET", "uri": "/security-profiles/{InstanceId}/{SecurityProfileId}", @@ -16915,20 +16915,20 @@ "Filters": { "target": "com.amazonaws.connect#FiltersV2List", "traits": { - "smithy.api#documentation": "

The filters to apply to returned metrics. You can filter on the following resources:

\n
    \n
  • \n

    Agents

    \n
  • \n
  • \n

    Channels

    \n
  • \n
  • \n

    Feature

    \n
  • \n
  • \n

    Queues

    \n
  • \n
  • \n

    Routing profiles

    \n
  • \n
  • \n

    Routing step expression

    \n
  • \n
  • \n

    User hierarchy groups

    \n
  • \n
\n

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy\n groups.

\n

To filter by phone number, see Create a historical\n metrics report in the Amazon Connect Administrator\n Guide.

\n

Note the following limits:

\n
    \n
  • \n

    \n Filter keys: A maximum of 5 filter keys are supported in\n a single request. Valid filter keys: AGENT |\n AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO |\n AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR |\n AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN |\n CASE_STATUS | CHANNEL |\n contact/segmentAttributes/connect:Subtype | FEATURE |\n FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID |\n FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE |\n FLOWS_RESOURCE_ID | INITIATION_METHOD |\n RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE |\n ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED |\n

    \n
  • \n
  • \n

    \n Filter values: A maximum of 100 filter values are\n supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the\n CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a\n GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total\n of 100 filter values, along with 3 channel filters.

    \n

    \n contact_lens_conversational_analytics is a valid filterValue for the\n FEATURE filter key. It is available only to contacts analyzed by Contact Lens\n conversational analytics.

    \n

    \n connect:Chat, connect:SMS, connect:Telephony, and\n connect:WebRTC are valid filterValue examples (not exhaustive) for\n the contact/segmentAttributes/connect:Subtype filter key.

    \n

    \n ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000\n length. This filter is case and order sensitive. JSON string fields must be sorted in ascending\n order and JSON array order should be kept as is.

    \n

    \n Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the\n Q_CONNECT_ENABLED filter key.

    \n
      \n
    • \n

      TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

      \n
    • \n
    • \n

      FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

      \n
    • \n
    \n

    This filter is available only for contact record-driven metrics.

    \n
  • \n
", + "smithy.api#documentation": "

The filters to apply to returned metrics. You can filter on the following resources:

\n
    \n
  • \n

    Agents

    \n
  • \n
  • \n

    Campaigns

    \n
  • \n
  • \n

    Channels

    \n
  • \n
  • \n

    Feature

    \n
  • \n
  • \n

    Queues

    \n
  • \n
  • \n

    Routing profiles

    \n
  • \n
  • \n

    Routing step expression

    \n
  • \n
  • \n

    User hierarchy groups

    \n
  • \n
\n

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy\n groups.

\n

For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least\n one filter requirement.

\n

To filter by phone number, see Create a historical\n metrics report in the Amazon Connect Administrator\n Guide.

\n

Note the following limits:

\n
    \n
  • \n

    \n Filter keys: A maximum of 5 filter keys are supported in\n a single request. Valid filter keys: AGENT |\n AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO |\n AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR |\n AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS |\n CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS |\n CHANNEL | contact/segmentAttributes/connect:Subtype |\n DISCONNECT_REASON | FEATURE | FLOW_TYPE |\n FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID |\n FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID |\n INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP |\n ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE |\n Q_CONNECT_ENABLED |

    \n
  • \n
  • \n

    \n Filter values: A maximum of 100 filter values are\n supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the\n CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a\n GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total\n of 100 filter values, along with 3 channel filters.

    \n

    \n contact_lens_conversational_analytics is a valid filterValue for the\n FEATURE filter key. It is available only to contacts analyzed by Contact Lens\n conversational analytics.

    \n

    \n connect:Chat, connect:SMS, connect:Telephony, and\n connect:WebRTC are valid filterValue examples (not exhaustive) for\n the contact/segmentAttributes/connect:Subtype filter key.

    \n

    \n ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000\n length. This filter is case and order sensitive. JSON string fields must be sorted in ascending\n order and JSON array order should be kept as is.

    \n

    \n Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the\n Q_CONNECT_ENABLED filter key.

    \n
      \n
    • \n

      TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

      \n
    • \n
    • \n

      FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

      \n
    • \n
    \n

    This filter is available only for contact record-driven metrics.

    \n

    \n Campaign ARNs are valid filterValues for the CAMPAIGN\n filter key.

    \n
  • \n
", "smithy.api#required": {} } }, "Groupings": { "target": "com.amazonaws.connect#GroupingsV2", "traits": { - "smithy.api#documentation": "

The grouping applied to the metrics that are returned. For example, when results are grouped\n by queue, the metrics returned are grouped by queue. The values that are returned apply to the\n metrics for each queue. They are not aggregated for all queues.

\n

If no grouping is specified, a summary of all metrics is returned.

\n

Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE |\n AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE |\n AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE |\n CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL |\n contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID |\n FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE\n | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE |\n RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE |\n ROUTING_STEP_EXPRESSION\n

" + "smithy.api#documentation": "

The grouping applied to the metrics that are returned. For example, when results are grouped\n by queue, the metrics returned are grouped by queue. The values that are returned apply to the\n metrics for each queue. They are not aggregated for all queues.

\n

If no grouping is specified, a summary of all metrics is returned.

\n

Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE |\n AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE |\n AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE |\n ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN |\n CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL |\n contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON |\n FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE\n | FLOWS_OUTCOME_TYPE | INITIATION_METHOD |\n Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP\n | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION\n

" } }, "Metrics": { "target": "com.amazonaws.connect#MetricsV2", "traits": { - "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_REMOVED_FROM_QUEUE_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts removed from queue in X seconds\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
SUM_CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", + "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_DIALS_PER_MINUTE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile

\n

UI name: Average dials per minute\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Campaign

\n

UI name: Average wait time after customer connection\n

\n
\n
CAMPAIGN_CONTACTS_ABANDONED_AFTER_X
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter GT (for\n Greater than).

\n

UI name: Campaign contacts abandoned after X\n

\n
\n
CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Percent

\n

Valid groupings and filters: Campaign, Agent

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter GT (for\n Greater than).

\n

UI name: Campaign contacts abandoned after X rate\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_REMOVED_FROM_QUEUE_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts removed from queue in X seconds\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
DELIVERY_ATTEMPTS
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status,\n Disconnect Reason

\n

UI name: Delivery attempts\n

\n
\n
DELIVERY_ATTEMPT_DISPOSITION_RATE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.

\n

Unit: Percent

\n

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason

\n \n

Answering Machine Detection Status and Disconnect Reason are valid filters but not valid\n groupings.

\n
\n

UI name: Delivery attempt disposition rate\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
HUMAN_ANSWERED_CALLS
\n
\n

This metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent

\n

UI name: Human answered\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
SUM_CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", "smithy.api#required": {} } }, @@ -21613,7 +21613,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists predefined attributes for the specified Amazon Connect instance.

", + "smithy.api#documentation": "

Lists predefined attributes for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.

", "smithy.api#http": { "method": "GET", "uri": "/predefined-attributes/{InstanceId}", @@ -22729,7 +22729,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the permissions granted to a security profile.

", + "smithy.api#documentation": "

Lists the permissions granted to a security profile.

\n

For information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.

", "smithy.api#http": { "method": "GET", "uri": "/security-profiles-permissions/{InstanceId}/{SecurityProfileId}", @@ -22839,7 +22839,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides summary information about the security profiles for the specified Amazon Connect instance.

\n

For more information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide.

", + "smithy.api#documentation": "

Provides summary information about the security profiles for the specified Amazon Connect instance.

\n

For more information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.

", "smithy.api#http": { "method": "GET", "uri": "/security-profiles-summary/{InstanceId}", @@ -23845,12 +23845,12 @@ "AgentsCriteria": { "target": "com.amazonaws.connect#AgentsCriteria", "traits": { - "smithy.api#documentation": "

An object to define AgentIds.

" + "smithy.api#documentation": "

An object to define agentIds.

" } } }, "traits": { - "smithy.api#documentation": "

An object to define AgentsCriteria.

" + "smithy.api#documentation": "

An object to define AgentsCriteria.

" } }, "com.amazonaws.connect#MaxResult10": { @@ -28279,6 +28279,67 @@ } } }, + "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryContent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1270 + } + } + }, + "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryFailureCode": { + "type": "enum", + "members": { + "QUOTA_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUOTA_EXCEEDED" + } + }, + "INSUFFICIENT_CONVERSATION_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSUFFICIENT_CONVERSATION_CONTENT" + } + }, + "FAILED_SAFETY_GUIDELINES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_SAFETY_GUIDELINES" + } + }, + "INVALID_ANALYSIS_CONFIGURATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ANALYSIS_CONFIGURATION" + } + }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + } + } + }, + "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + } + } + }, "com.amazonaws.connect#RealTimeContactAnalysisSegmentAttachments": { "type": "structure", "members": { @@ -28405,6 +28466,33 @@ "smithy.api#documentation": "

Segment type containing a list of detected issues.

" } }, + "com.amazonaws.connect#RealTimeContactAnalysisSegmentPostContactSummary": { + "type": "structure", + "members": { + "Content": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryContent", + "traits": { + "smithy.api#documentation": "

The content of the summary.

" + } + }, + "Status": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryStatus", + "traits": { + "smithy.api#documentation": "

Whether the summary was successfully COMPLETED or FAILED to be generated.

", + "smithy.api#required": {} + } + }, + "FailureCode": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryFailureCode", + "traits": { + "smithy.api#documentation": "

If the summary failed to be generated, one of the following failure codes occurs:

\n
    \n
  • \n

    \n QUOTA_EXCEEDED: The number of concurrent analytics jobs reached your service\n quota.

    \n
  • \n
  • \n

    \n INSUFFICIENT_CONVERSATION_CONTENT: The conversation needs to have at least\n one turn from both the participants in order to generate the summary.

    \n
  • \n
  • \n

    \n FAILED_SAFETY_GUIDELINES: The generated summary cannot be provided because it\n failed to meet system safety guidelines.

    \n
  • \n
  • \n

    \n INVALID_ANALYSIS_CONFIGURATION: This code occurs when, for example, you're\n using a language that isn't supported by generative AI-powered post-contact summaries.\n

    \n
  • \n
  • \n

    \n INTERNAL_ERROR: Internal system error.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the post-contact summary for a real-time contact segment.

" + } + }, "com.amazonaws.connect#RealTimeContactAnalysisSegmentTranscript": { "type": "structure", "members": { @@ -28504,6 +28592,12 @@ "traits": { "smithy.api#enumValue": "Attachments" } + }, + "PostContactSummary": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PostContactSummary" + } } } }, @@ -28515,7 +28609,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 5 + "max": 6 } } }, @@ -28708,6 +28802,12 @@ "traits": { "smithy.api#documentation": "

The analyzed attachments.

" } + }, + "PostContactSummary": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisSegmentPostContactSummary", + "traits": { + "smithy.api#documentation": "

Information about the post-contact summary.

" + } } }, "traits": { @@ -29437,6 +29537,60 @@ "smithy.api#documentation": "

Latest routing criteria on the contact.

" } }, + "com.amazonaws.connect#RoutingCriteriaInput": { + "type": "structure", + "members": { + "Steps": { + "target": "com.amazonaws.connect#RoutingCriteriaInputSteps", + "traits": { + "smithy.api#documentation": "

When Amazon Connect does not find an available agent meeting the requirements in a step for\u2028 \n a given step duration, the routing criteria will move on to the next step sequentially until a\u2028 \n join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent \n in the queue.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object to define the RoutingCriteria.

" + } + }, + "com.amazonaws.connect#RoutingCriteriaInputStep": { + "type": "structure", + "members": { + "Expiry": { + "target": "com.amazonaws.connect#RoutingCriteriaInputStepExpiry", + "traits": { + "smithy.api#documentation": "

An object to specify the expiration of a routing step.

" + } + }, + "Expression": { + "target": "com.amazonaws.connect#Expression", + "traits": { + "smithy.api#documentation": "

A tagged union to specify expression for a routing step.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Step defines the list of agents to be routed or route based on the agent requirements such as ProficiencyLevel, \n Name, or Value.

" + } + }, + "com.amazonaws.connect#RoutingCriteriaInputStepExpiry": { + "type": "structure", + "members": { + "DurationInSeconds": { + "target": "com.amazonaws.connect#DurationInSeconds", + "traits": { + "smithy.api#documentation": "

The number of seconds that the contact will be routed only to agents matching this routing\u2028 step, if expiry \n was configured for this routing step.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specify whether this routing criteria step should apply for only a limited amount of time,\u2028 or if it should \n never expire.

" + } + }, + "com.amazonaws.connect#RoutingCriteriaInputSteps": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#RoutingCriteriaInputStep" + } + }, "com.amazonaws.connect#RoutingCriteriaStepStatus": { "type": "enum", "members": { @@ -30973,7 +31127,7 @@ } ], "traits": { - "smithy.api#documentation": "

Predefined attributes that meet certain criteria.

", + "smithy.api#documentation": "

Searches predefined attributes that meet certain criteria. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.

", "smithy.api#http": { "method": "POST", "uri": "/search-predefined-attributes", @@ -31594,7 +31748,7 @@ } ], "traits": { - "smithy.api#documentation": "

Searches security profiles in an Amazon Connect instance, with optional\n filtering.

", + "smithy.api#documentation": "

Searches security profiles in an Amazon Connect instance, with optional\n filtering.

\n

For information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.

", "smithy.api#http": { "method": "POST", "uri": "/search-security-profiles", @@ -35162,7 +35316,7 @@ "Comparison": { "target": "com.amazonaws.connect#ResourceArnOrId", "traits": { - "smithy.api#documentation": "

The type of comparison. Only \"less than\" (LT) comparisons are supported.

" + "smithy.api#documentation": "

The type of comparison. Only \"less than\" (LT) and \"greater than\" (GT) comparisons are\n supported.

" } }, "ThresholdValue": { @@ -36732,6 +36886,12 @@ "traits": { "smithy.api#documentation": "

Priority of the contact in the queue. The default priority for new contacts is 5. You can\n raise the priority of a contact compared to other contacts in the queue by assigning them a\n higher priority, such as 1 or 2.

" } + }, + "RoutingCriteria": { + "target": "com.amazonaws.connect#RoutingCriteriaInput", + "traits": { + "smithy.api#documentation": "

Updates the routing criteria on the contact. These properties can be used to change how a\u2028\n contact is routed within the queue.

" + } } }, "traits": { @@ -37471,7 +37631,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a predefined attribute for the specified Amazon Connect instance.

", + "smithy.api#documentation": "

Updates a predefined attribute for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.

", "smithy.api#http": { "method": "POST", "uri": "/predefined-attributes/{InstanceId}/{Name}", @@ -38530,7 +38690,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a security profile.

", + "smithy.api#documentation": "

Updates a security profile.

\n

For information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.

", "smithy.api#http": { "method": "POST", "uri": "/security-profiles/{InstanceId}/{SecurityProfileId}", diff --git a/models/controlcatalog.json b/models/controlcatalog.json index 1f9a73a503..32b25e7876 100644 --- a/models/controlcatalog.json +++ b/models/controlcatalog.json @@ -163,6 +163,39 @@ "target": "com.amazonaws.controlcatalog#CommonControlSummary" } }, + "com.amazonaws.controlcatalog#ControlArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 34, + "max": 2048 + }, + "smithy.api#pattern": "^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+$" + } + }, + "com.amazonaws.controlcatalog#ControlBehavior": { + "type": "enum", + "members": { + "PREVENTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PREVENTIVE" + } + }, + "PROACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROACTIVE" + } + }, + "DETECTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DETECTIVE" + } + } + } + }, "com.amazonaws.controlcatalog#ControlCatalog": { "type": "service", "version": "2018-05-10", @@ -170,6 +203,9 @@ { "target": "com.amazonaws.controlcatalog#CommonControlResource" }, + { + "target": "com.amazonaws.controlcatalog#ControlResource" + }, { "target": "com.amazonaws.controlcatalog#DomainResource" }, @@ -881,6 +917,88 @@ } } }, + "com.amazonaws.controlcatalog#ControlResource": { + "type": "resource", + "identifiers": { + "ControlArn": { + "target": "com.amazonaws.controlcatalog#ControlArn" + } + }, + "read": { + "target": "com.amazonaws.controlcatalog#GetControl" + }, + "list": { + "target": "com.amazonaws.controlcatalog#ListControls" + }, + "traits": { + "aws.api#arn": { + "template": "{ControlArn}", + "absolute": true + }, + "aws.iam#disableConditionKeyInference": {}, + "aws.iam#iamResource": { + "name": "control" + } + } + }, + "com.amazonaws.controlcatalog#ControlScope": { + "type": "enum", + "members": { + "GLOBAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOBAL" + } + }, + "REGIONAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REGIONAL" + } + } + } + }, + "com.amazonaws.controlcatalog#ControlSummary": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.controlcatalog#ControlArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the control.

", + "smithy.api#required": {} + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The display name of the control.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A description of the control, as it may appear in the console. Describes the functionality of the control.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Overview of information about a control.

" + } + }, + "com.amazonaws.controlcatalog#Controls": { + "type": "list", + "member": { + "target": "com.amazonaws.controlcatalog#ControlSummary" + } + }, + "com.amazonaws.controlcatalog#DeployableRegions": { + "type": "list", + "member": { + "target": "com.amazonaws.controlcatalog#RegionCode" + } + }, "com.amazonaws.controlcatalog#DomainArn": { "type": "string", "traits": { @@ -982,6 +1100,98 @@ "target": "com.amazonaws.controlcatalog#DomainSummary" } }, + "com.amazonaws.controlcatalog#GetControl": { + "type": "operation", + "input": { + "target": "com.amazonaws.controlcatalog#GetControlRequest" + }, + "output": { + "target": "com.amazonaws.controlcatalog#GetControlResponse" + }, + "errors": [ + { + "target": "com.amazonaws.controlcatalog#AccessDeniedException" + }, + { + "target": "com.amazonaws.controlcatalog#InternalServerException" + }, + { + "target": "com.amazonaws.controlcatalog#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.controlcatalog#ThrottlingException" + }, + { + "target": "com.amazonaws.controlcatalog#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns details about a specific control, most notably a list of Amazon Web Services Regions where this control is supported. Input a value for the ControlArn parameter, in ARN form. GetControl accepts controltower or controlcatalog control ARNs as input. Returns a controlcatalog ARN format.

\n

In the API response, controls that have the value GLOBAL in the Scope field do not show the DeployableRegions field, because it does not apply. Controls that have the value REGIONAL in the Scope field return a value for the DeployableRegions field, as shown in the example.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/get-control" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.controlcatalog#GetControlRequest": { + "type": "structure", + "members": { + "ControlArn": { + "target": "com.amazonaws.controlcatalog#ControlArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the control. It has one of the following formats:

\n

\n Global format\n

\n

\n arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}\n

\n

\n Or Regional format\n

\n

\n arn:{PARTITION}:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}\n

\n

Here is a more general pattern that covers Amazon Web Services Control Tower and Control Catalog ARNs:

\n

\n ^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\\\-]+$\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.controlcatalog#GetControlResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.controlcatalog#ControlArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the control.

", + "smithy.api#required": {} + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The display name of the control.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A description of what the control does.

", + "smithy.api#required": {} + } + }, + "Behavior": { + "target": "com.amazonaws.controlcatalog#ControlBehavior", + "traits": { + "smithy.api#documentation": "

A term that identifies the control's functional behavior. One of Preventive, Deteictive, Proactive\n

", + "smithy.api#required": {} + } + }, + "RegionConfiguration": { + "target": "com.amazonaws.controlcatalog#RegionConfiguration", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.controlcatalog#InternalServerException": { "type": "structure", "members": { @@ -1083,6 +1293,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.controlcatalog#ListControls": { + "type": "operation", + "input": { + "target": "com.amazonaws.controlcatalog#ListControlsRequest" + }, + "output": { + "target": "com.amazonaws.controlcatalog#ListControlsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.controlcatalog#AccessDeniedException" + }, + { + "target": "com.amazonaws.controlcatalog#InternalServerException" + }, + { + "target": "com.amazonaws.controlcatalog#ThrottlingException" + }, + { + "target": "com.amazonaws.controlcatalog#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/list-controls" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Controls" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.controlcatalog#ListControlsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.controlcatalog#PaginationToken", + "traits": { + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.controlcatalog#MaxListControlsResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results on a page or for an API request call.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.controlcatalog#ListControlsResponse": { + "type": "structure", + "members": { + "Controls": { + "target": "com.amazonaws.controlcatalog#Controls", + "traits": { + "smithy.api#documentation": "

Returns a list of controls, given as structures of type controlSummary.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.controlcatalog#PaginationToken", + "traits": { + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.controlcatalog#ListDomains": { "type": "operation", "input": { @@ -1260,6 +1551,15 @@ } } }, + "com.amazonaws.controlcatalog#MaxListControlsResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, "com.amazonaws.controlcatalog#MaxListDomainsResults": { "type": "integer", "traits": { @@ -1408,6 +1708,46 @@ } } }, + "com.amazonaws.controlcatalog#RegionCode": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9-]{1,128}$" + } + }, + "com.amazonaws.controlcatalog#RegionConfiguration": { + "type": "structure", + "members": { + "Scope": { + "target": "com.amazonaws.controlcatalog#ControlScope", + "traits": { + "smithy.api#documentation": "

The coverage of the control, if deployed. Scope is an enumerated type, with value Regional, or Global. A control with Global scope is effective in all Amazon Web Services Regions, regardless of the Region from which it is enabled, or to which it is deployed. A control implemented by an SCP is usually Global in scope. A control with Regional scope has operations that are restricted specifically to the Region from which it is enabled and to which it is deployed. Controls implemented by Config rules and CloudFormation hooks usually are Regional in scope. Security Hub controls usually are Regional in scope.

", + "smithy.api#required": {} + } + }, + "DeployableRegions": { + "target": "com.amazonaws.controlcatalog#DeployableRegions", + "traits": { + "smithy.api#documentation": "

Regions in which the control is available to be deployed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment.

\n

If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

" + } + }, + "com.amazonaws.controlcatalog#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The requested resource does not exist.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, "com.amazonaws.controlcatalog#ThrottlingException": { "type": "structure", "members": { diff --git a/models/controltower.json b/models/controltower.json index 996779814b..b70920fe1f 100644 --- a/models/controltower.json +++ b/models/controltower.json @@ -67,7 +67,7 @@ "x-amzn-trace-id" ] }, - "smithy.api#documentation": "

Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources:

\n \n

For more information about these types of resources, see the \n Amazon Web Services Control Tower User Guide\n .

\n

\n About control APIs\n

\n

These interfaces allow you to apply the Amazon Web Services library of pre-defined\n controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

\n

To call these APIs, you'll need to know:

\n
    \n
  • \n

    the controlIdentifier for the control--or guardrail--you are targeting.

    \n
  • \n
  • \n

    the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

    \n
  • \n
  • \n

    the ARN associated with a resource that you wish to tag or untag.

    \n
  • \n
\n

\n To get the controlIdentifier for your Amazon Web Services Control Tower\n control:\n

\n

The controlIdentifier is an ARN that is specified for each\n control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

\n

The controlIdentifier is unique in each Amazon Web Services Region for each control. You can\n find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide.

\n

A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and\n Elective controls is given in Resource identifiers for\n APIs and controls in the \n Amazon Web Services Control Tower Controls Reference Guide\n . Remember that Mandatory controls cannot be added or removed.

\n \n

\n ARN format:\n arn:aws:controltower:{REGION}::control/{CONTROL_NAME}\n

\n

\n Example:\n

\n

\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED\n

\n
\n

\n To get the targetIdentifier:\n

\n

The targetIdentifier is the ARN for an OU.

\n

In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

\n \n

\n OU ARN format:\n

\n

\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}\n

\n
\n

\n About landing zone APIs\n

\n

You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs.

\n

For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the \"Actions\" section.

\n

\n About baseline APIs\n

\n

You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines.

\n

You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines.

\n

The individual API operations for baselines are detailed in this document, the API reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.

\n

\n Details and examples\n

\n \n

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n

\n

\n Recording API Requests\n

\n

Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your\n Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by\n CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made\n the request and when, and so on. For more about Amazon Web Services Control Tower and its support for\n CloudTrail, see Logging Amazon Web Services Control Tower\n Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about\n CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User\n Guide.

", + "smithy.api#documentation": "

Amazon Web Services Control Tower offers application programming interface (API)\n operations that support programmatic interaction with these types of resources:

\n \n

For more information about these types of resources, see the \n Amazon Web Services Control Tower User Guide\n .

\n

\n About control APIs\n

\n

These interfaces allow you to apply the Amazon Web Services library of pre-defined\n controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

\n

To call these APIs, you'll need to know:

\n
    \n
  • \n

    the controlIdentifier for the control--or guardrail--you are\n targeting.

    \n
  • \n
  • \n

    the ARN associated with the target organizational unit (OU), which we call the\n targetIdentifier.

    \n
  • \n
  • \n

    the ARN associated with a resource that you wish to tag or untag.

    \n
  • \n
\n

\n To get the controlIdentifier for your Amazon Web Services Control Tower control:\n

\n

The controlIdentifier is an ARN that is specified for each control. You can\n view the controlIdentifier in the console on the Control\n details page, as well as in the documentation.

\n

\n About identifiers for Amazon Web Services Control Tower\n

\n

The Amazon Web Services Control Tower controlIdentifier is unique in each Amazon Web Services Region for each\n control. You can find the controlIdentifier for each Region and control in the\n Tables of control metadata or the Control\n availability by Region tables in the Amazon Web Services Control Tower\n Controls Reference Guide.

\n

A quick-reference list of control identifers for the Amazon Web Services Control Tower\n legacy Strongly recommended and Elective controls\n is given in Resource\n identifiers for APIs and controls in the \n Amazon Web Services Control Tower Controls Reference Guide\n . Remember\n that Mandatory controls cannot be added or removed.

\n \n

\n Some controls have two identifiers\n

\n
    \n
  • \n

    \n ARN format for Amazon Web Services Control\n Tower:\n arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}\n

    \n

    \n Example:\n

    \n

    \n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED\n

    \n
  • \n
  • \n

    \n ARN format for Amazon Web Services Control\n Catalog:\n arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}\n

    \n
  • \n
\n

You can find the {CONTROL_CATALOG_OPAQUE_ID} in the \n Amazon Web Services Control Tower Controls Reference\n Guide\n , or in the Amazon Web Services Control Tower console, on the\n Control details page.

\n

The Amazon Web Services Control Tower APIs for enabled controls, such as\n GetEnabledControl and ListEnabledControls always return an\n ARN of the same type given when the control was enabled.

\n
\n

\n To get the targetIdentifier:\n

\n

The targetIdentifier is the ARN for an OU.

\n

In the Amazon Web Services Organizations console, you can find the ARN for the OU on the\n Organizational unit details page associated with that\n OU.

\n \n

\n OU ARN format:\n

\n

\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}\n

\n
\n

\n About landing zone APIs\n

\n

You can configure and launch an Amazon Web Services Control Tower landing zone with APIs.\n For an introduction and steps, see Getting started with\n Amazon Web Services Control Tower using APIs.

\n

For an overview of landing zone API operations, see \n Amazon Web Services Control Tower supports landing zone APIs. The individual API\n operations for landing zones are detailed in this document, the API reference\n manual, in the \"Actions\" section.

\n

\n About baseline APIs\n

\n

You can apply the AWSControlTowerBaseline baseline to an organizational\n unit (OU) as a way to register the OU with Amazon Web Services Control Tower,\n programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration\n with baselines.

\n

You can call the baseline API operations to view the baselines that Amazon Web Services\n Control Tower enables for your landing zone, on your behalf, when setting up the landing\n zone. These baselines are read-only baselines.

\n

The individual API operations for baselines are detailed in this document, the API\n reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.

\n

\n About Amazon Web Services Control Catalog identifiers\n

\n
    \n
  • \n

    The EnableControl and DisableControl API operations can\n be called by specifying either the Amazon Web Services Control Tower identifer or the\n Amazon Web Services Control Catalog identifier. The API response returns the same\n type of identifier that you specified when calling the API.

    \n
  • \n
  • \n

    If you use an Amazon Web Services Control Tower identifier to call the\n EnableControl API, and then call EnableControl again\n with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control\n Tower returns an error message stating that the control is already enabled. Similar\n behavior applies to the DisableControl API operation.

    \n
  • \n
  • \n

    Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only.

    \n
  • \n
\n

\n Details and examples\n

\n \n

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n

\n

\n Recording API Requests\n

\n

Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that\n records Amazon Web Services API calls for your Amazon Web Services account and delivers log\n files to an Amazon S3 bucket. By using information collected by CloudTrail, you can\n determine which requests the Amazon Web Services Control Tower service received, who made\n the request and when, and so on. For more about Amazon Web Services Control Tower and its\n support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the\n Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including\n how to turn it on and find your log files, see the Amazon Web Services CloudTrail User\n Guide.

", "smithy.api#title": "AWS Control Tower", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/cost-optimization-hub.json b/models/cost-optimization-hub.json index 08cdf093d0..52e9b667e5 100644 --- a/models/cost-optimization-hub.json +++ b/models/cost-optimization-hub.json @@ -1943,7 +1943,7 @@ "includeMemberAccounts": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

The enrollment status of all member accounts in the organization if the account is the\n management account.

" + "smithy.api#documentation": "

The enrollment status of all member accounts in the organization if the account is the\n management account or delegated administrator.

" } }, "nextToken": { @@ -2006,13 +2006,22 @@ "maxResults": { "target": "com.amazonaws.costoptimizationhub#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of recommendations that are returned for the request.

", + "smithy.api#documentation": "

The maximum number of recommendations to be returned for the request.

", "smithy.api#range": { "min": 0, "max": 1000 } } }, + "metrics": { + "target": "com.amazonaws.costoptimizationhub#SummaryMetricsList", + "traits": { + "smithy.api#documentation": "

Additional metrics to be returned for the request. The only valid value is\n savingsPercentage.

", + "smithy.api#tags": [ + "delegatedAdmin" + ] + } + }, "nextToken": { "target": "smithy.api#String", "traits": { @@ -2036,7 +2045,7 @@ "items": { "target": "com.amazonaws.costoptimizationhub#RecommendationSummariesList", "traits": { - "smithy.api#documentation": "

List of all savings recommendations.

" + "smithy.api#documentation": "

A list of all savings recommendations.

" } }, "groupBy": { @@ -2051,6 +2060,15 @@ "smithy.api#documentation": "

The currency code used for the recommendation.

" } }, + "metrics": { + "target": "com.amazonaws.costoptimizationhub#SummaryMetricsResult", + "traits": { + "smithy.api#documentation": "

The results or descriptions for the additional metrics, based on whether the metrics were\n or were not requested.

", + "smithy.api#tags": [ + "delegatedAdmin" + ] + } + }, "nextToken": { "target": "smithy.api#String", "traits": { @@ -3353,6 +3371,43 @@ "smithy.api#documentation": "

The storage configuration used for recommendations.

" } }, + "com.amazonaws.costoptimizationhub#SummaryMetrics": { + "type": "enum", + "members": { + "SAVINGS_PERCENTAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SavingsPercentage" + } + } + } + }, + "com.amazonaws.costoptimizationhub#SummaryMetricsList": { + "type": "list", + "member": { + "target": "com.amazonaws.costoptimizationhub#SummaryMetrics" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.costoptimizationhub#SummaryMetricsResult": { + "type": "structure", + "members": { + "savingsPercentage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The savings percentage based on your Amazon Web Services spend over the past 30\n days.

\n \n

Savings percentage is only supported when filtering by Region, account ID, or\n tags.

\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The results or descriptions for the additional metrics, based on whether the metrics were\n or were not requested.

" + } + }, "com.amazonaws.costoptimizationhub#Tag": { "type": "structure", "members": { @@ -3421,7 +3476,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization\n Hub service.

\n

If the account is a management account of an organization, this action can also be used to\n enroll member accounts of the organization.

\n

You must have the appropriate permissions to opt in to Cost Optimization Hub and to view\n its recommendations. When you opt in, Cost Optimization Hub automatically creates a\n service-linked role in your account to access its data.

" + "smithy.api#documentation": "

Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization\n Hub service.

\n

If the account is a management account or delegated administrator of an organization, this\n action can also be used to enroll member accounts of the organization.

\n

You must have the appropriate permissions to opt in to Cost Optimization Hub and to view\n its recommendations. When you opt in, Cost Optimization Hub automatically creates a\n service-linked role in your account to access its data.

" } }, "com.amazonaws.costoptimizationhub#UpdateEnrollmentStatusRequest": { @@ -3437,7 +3492,7 @@ "includeMemberAccounts": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether to enroll member accounts of the organization if the account is the\n management account.

" + "smithy.api#documentation": "

Indicates whether to enroll member accounts of the organization if the account is the\n management account or delegated administrator.

" } } }, diff --git a/models/datazone.json b/models/datazone.json index 42d9639333..45659cbcf9 100644 --- a/models/datazone.json +++ b/models/datazone.json @@ -483,6 +483,104 @@ } ] }, + "com.amazonaws.datazone#AssetFilterConfiguration": { + "type": "union", + "members": { + "columnConfiguration": { + "target": "com.amazonaws.datazone#ColumnFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The column configuration of the asset filter.

" + } + }, + "rowConfiguration": { + "target": "com.amazonaws.datazone#RowFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The row configuration of the asset filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details of the asset filter.

" + } + }, + "com.amazonaws.datazone#AssetFilterSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where the asset filter lives.

", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "

The name of the asset filter.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset filter.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset filter.

" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "

The effective column names of the asset filter.

" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The effective row filter of the asset filter.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the asset filter was created.

" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error message that is displayed if the action does not succeed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of the asset filter.

" + } + }, + "com.amazonaws.datazone#AssetFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AssetFilterSummary" + } + }, "com.amazonaws.datazone#AssetId": { "type": "string", "traits": { @@ -495,6 +593,38 @@ "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" } }, + "com.amazonaws.datazone#AssetInDataProductListingItem": { + "type": "structure", + "members": { + "entityId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The entity ID of the listing of the asset in a data product.

" + } + }, + "entityRevision": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The entity revision of the listing of the asset in a data product.

" + } + }, + "entityType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The entity type of the listing of the asset in a data product.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The listing of the asset in a data product.

" + } + }, + "com.amazonaws.datazone#AssetInDataProductListingItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AssetInDataProductListingItem" + } + }, "com.amazonaws.datazone#AssetItem": { "type": "structure", "members": { @@ -1462,6 +1592,26 @@ "smithy.api#documentation": "

Part of the provisioning properties of the environment blueprint.

" } }, + "com.amazonaws.datazone#ColumnFilterConfiguration": { + "type": "structure", + "members": { + "includedColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "

Specifies whether to include column names.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The column configuration of the asset filter.

" + } + }, + "com.amazonaws.datazone#ColumnNameList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, "com.amazonaws.datazone#ConfigurableActionParameter": { "type": "structure", "members": { @@ -1590,6 +1740,177 @@ "smithy.api#idempotent": {} } }, + "com.amazonaws.datazone#CreateAssetFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateAssetFilterInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateAssetFilterOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a data asset filter.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain in which you want to create an asset filter.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "

The name of the asset filter.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset filter.

" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the asset filter.

", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateAssetFilterOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where the asset filter is created.

", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "

The name of the asset filter.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset filter.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset filter.

" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the asset filter.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the asset filter was created.

" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error message that is displayed if the asset filter is not created\n successfully.

" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "

The column names in the asset filter.

" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The row filter in the asset filter.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#CreateAssetInput": { "type": "structure", "members": { @@ -2207,13 +2528,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateDataSource": { + "com.amazonaws.datazone#CreateDataProduct": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateDataSourceInput" + "target": "com.amazonaws.datazone#CreateDataProductInput" }, "output": { - "target": "com.amazonaws.datazone#CreateDataSourceOutput" + "target": "com.amazonaws.datazone#CreateDataProductOutput" }, "errors": [ { @@ -2239,99 +2560,66 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon DataZone data source.

", + "smithy.api#documentation": "

Creates a data product.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/data-sources" + "uri": "/v2/domains/{domainIdentifier}/data-products" }, "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateDataSourceInput": { + "com.amazonaws.datazone#CreateDataProductInput": { "type": "structure", "members": { - "name": { - "target": "com.amazonaws.datazone#Name", - "traits": { - "smithy.api#documentation": "

The name of the data source.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of the data source.

" - } - }, "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the data source is created.

", + "smithy.api#documentation": "

The ID of the domain where the data product is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "projectIdentifier": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone project in which you want to add this data\n source.

", - "smithy.api#required": {} - } - }, - "environmentIdentifier": { - "target": "smithy.api#String", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "

The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.

", + "smithy.api#documentation": "

The name of the data product.

", "smithy.api#required": {} } }, - "type": { - "target": "com.amazonaws.datazone#DataSourceType", + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The type of the data source.

", + "smithy.api#documentation": "

The ID of the owning project of the data product.

", "smithy.api#required": {} } }, - "configuration": { - "target": "com.amazonaws.datazone#DataSourceConfigurationInput", - "traits": { - "smithy.api#documentation": "

Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration or redshiftRunConfiguration.

", - "smithy.api#notProperty": {} - } - }, - "recommendation": { - "target": "com.amazonaws.datazone#RecommendationConfiguration", - "traits": { - "smithy.api#documentation": "

Specifies whether the business name generation is to be enabled for this data\n source.

" - } - }, - "enableSetting": { - "target": "com.amazonaws.datazone#EnableSetting", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "

Specifies whether the data source is enabled.

" + "smithy.api#documentation": "

The description of the data product.

" } }, - "schedule": { - "target": "com.amazonaws.datazone#ScheduleConfiguration", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

The schedule of the data source runs.

" + "smithy.api#documentation": "

The glossary terms of the data product.

" } }, - "publishOnImport": { - "target": "smithy.api#Boolean", + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList", "traits": { - "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" + "smithy.api#documentation": "

The metadata forms of the data product.

" } }, - "assetFormsInput": { - "target": "com.amazonaws.datazone#FormInputList", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "

The metadata forms that are to be attached to the assets that this data source works\n with.

" + "smithy.api#documentation": "

The data assets of the data product.

" } }, "clientToken": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#ClientToken", "traits": { "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", "smithy.api#idempotencyToken": {} @@ -2342,133 +2630,98 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateDataSourceOutput": { + "com.amazonaws.datazone#CreateDataProductOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#DataSourceId", + "domainId": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#documentation": "

The ID of the domain where the data product lives.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#DataSourceStatus", - "traits": { - "smithy.api#documentation": "

The status of the data source.

" - } - }, - "type": { - "target": "com.amazonaws.datazone#DataSourceType", - "traits": { - "smithy.api#documentation": "

The type of the data source.

" - } - }, - "name": { - "target": "com.amazonaws.datazone#Name", + "id": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#documentation": "

The ID of the data product.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of the data source.

" - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source is created.

", + "smithy.api#documentation": "

The revision of the data product.

", "smithy.api#required": {} } }, - "projectId": { + "owningProjectId": { "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project to which the data source is added.

", + "smithy.api#documentation": "

The ID of the owning project of the data product.

", "smithy.api#required": {} } }, - "environmentId": { - "target": "com.amazonaws.datazone#EnvironmentId", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "

The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.

", + "smithy.api#documentation": "

The name of the data product.

", "smithy.api#required": {} } }, - "configuration": { - "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", + "status": { + "target": "com.amazonaws.datazone#DataProductStatus", "traits": { - "smithy.api#documentation": "

Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration or redshiftRunConfiguration.

", - "smithy.api#notProperty": {} + "smithy.api#default": "CREATED", + "smithy.api#documentation": "

The status of the data product.

", + "smithy.api#required": {} } }, - "recommendation": { - "target": "com.amazonaws.datazone#RecommendationConfiguration", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "

Specifies whether the business name generation is to be enabled for this data\n source.

" + "smithy.api#documentation": "

The description of the data product.

" } }, - "enableSetting": { - "target": "com.amazonaws.datazone#EnableSetting", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

Specifies whether the data source is enabled.

" + "smithy.api#documentation": "

The glossary terms of the data product.

" } }, - "publishOnImport": { - "target": "smithy.api#Boolean", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" + "smithy.api#documentation": "

The data assets of the data product.

" } }, - "assetFormsOutput": { + "formsOutput": { "target": "com.amazonaws.datazone#FormOutputList", "traits": { - "smithy.api#documentation": "

The metadata forms attached to the assets that this data source creates.

" - } - }, - "schedule": { - "target": "com.amazonaws.datazone#ScheduleConfiguration", - "traits": { - "smithy.api#documentation": "

The schedule of the data source runs.

" - } - }, - "lastRunStatus": { - "target": "com.amazonaws.datazone#DataSourceRunStatus", - "traits": { - "smithy.api#documentation": "

The status of the last run of this data source.

" - } - }, - "lastRunAt": { - "target": "com.amazonaws.datazone#DateTime", - "traits": { - "smithy.api#documentation": "

The timestamp that specifies when the data source was last run.

" + "smithy.api#documentation": "

The metadata forms of the data product.

" } }, - "lastRunErrorMessage": { - "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + "smithy.api#documentation": "

The timestamp at which the data product was created.

" } }, - "errorMessage": { - "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + "smithy.api#documentation": "

The user who created the data product.

" } }, - "createdAt": { - "target": "com.amazonaws.datazone#DateTime", + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

The timestamp of when the data source was created.

" + "smithy.api#documentation": "

The timestamp at which the first revision of the data product was created.

" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#DateTime", + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The timestamp of when the data source was updated.

" + "smithy.api#documentation": "

The user who created the first revision of the data product.

" } } }, @@ -2476,13 +2729,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateDomain": { + "com.amazonaws.datazone#CreateDataProductRevision": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateDomainInput" + "target": "com.amazonaws.datazone#CreateDataProductRevisionInput" }, "output": { - "target": "com.amazonaws.datazone#CreateDomainOutput" + "target": "com.amazonaws.datazone#CreateDataProductRevisionOutput" }, "errors": [ { @@ -2497,9 +2750,6 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, - { - "target": "com.amazonaws.datazone#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -2508,61 +2758,67 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon DataZone domain.

", + "smithy.api#documentation": "

Creates a data product revision.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains" + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions" }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateDomainInput": { + "com.amazonaws.datazone#CreateDataProductRevisionInput": { "type": "structure", "members": { - "name": { - "target": "smithy.api#String", + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The name of the Amazon DataZone domain.

", + "smithy.api#documentation": "

The ID of the domain where the data product revision is created.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "description": { - "target": "smithy.api#String", + "identifier": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" + "smithy.api#documentation": "

The ID of the data product revision.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "singleSignOn": { - "target": "com.amazonaws.datazone#SingleSignOn", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "

The single-sign on configuration of the Amazon DataZone domain.

" + "smithy.api#documentation": "

The name of the data product revision.

", + "smithy.api#required": {} } }, - "domainExecutionRole": { - "target": "com.amazonaws.datazone#RoleArn", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "

The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of the data product revision.

" } }, - "kmsKeyIdentifier": { - "target": "com.amazonaws.datazone#KmsKeyArn", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" + "smithy.api#documentation": "

The glossary terms of the data product revision.

" } }, - "tags": { - "target": "com.amazonaws.datazone#Tags", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" + "smithy.api#documentation": "

The data assets of the data product revision.

" + } + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList", + "traits": { + "smithy.api#documentation": "

The metadata forms of the data product revision.

" } }, "clientToken": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#ClientToken", "traits": { "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", "smithy.api#idempotencyToken": {} @@ -2573,119 +2829,112 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateDomainOutput": { + "com.amazonaws.datazone#CreateDataProductRevisionOutput": { "type": "structure", "members": { - "id": { + "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#documentation": "

The ID of the domain where data product revision is created.

", "smithy.api#required": {} } }, - "name": { - "target": "smithy.api#String", + "id": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

The name of the Amazon DataZone domain.

" + "smithy.api#documentation": "

The ID of the data product revision.

", + "smithy.api#required": {} } }, - "description": { - "target": "smithy.api#String", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" + "smithy.api#documentation": "

The revision of the data product revision.

", + "smithy.api#required": {} } }, - "singleSignOn": { - "target": "com.amazonaws.datazone#SingleSignOn", + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The single-sign on configuration of the Amazon DataZone domain.

" + "smithy.api#documentation": "

The ID of the owning project of the data product revision.

", + "smithy.api#required": {} } }, - "domainExecutionRole": { - "target": "com.amazonaws.datazone#RoleArn", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "

The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.

" + "smithy.api#documentation": "

The name of the data product revision.

", + "smithy.api#required": {} } }, - "arn": { - "target": "smithy.api#String", + "status": { + "target": "com.amazonaws.datazone#DataProductStatus", "traits": { - "smithy.api#documentation": "

The ARN of the Amazon DataZone domain.

" + "smithy.api#default": "CREATED", + "smithy.api#documentation": "

The status of the data product revision.

", + "smithy.api#required": {} } }, - "kmsKeyIdentifier": { - "target": "com.amazonaws.datazone#KmsKeyArn", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" + "smithy.api#documentation": "

The description of the data product revision.

" } }, - "status": { - "target": "com.amazonaws.datazone#DomainStatus", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

The status of the Amazon DataZone domain.

" + "smithy.api#documentation": "

The glossary terms of the data product revision.

" } }, - "portalUrl": { - "target": "smithy.api#String", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "

The URL of the data portal for this Amazon DataZone domain.

" + "smithy.api#documentation": "

The data assets of the data product revision.

" } }, - "tags": { - "target": "com.amazonaws.datazone#Tags", + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", "traits": { - "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" + "smithy.api#documentation": "

The metadata forms of the data product revision.

" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.datazone#CreateEnvironment": { - "type": "operation", - "input": { - "target": "com.amazonaws.datazone#CreateEnvironmentInput" - }, - "output": { - "target": "com.amazonaws.datazone#CreateEnvironmentOutput" - }, - "errors": [ - { - "target": "com.amazonaws.datazone#AccessDeniedException" - }, - { - "target": "com.amazonaws.datazone#ConflictException" }, - { - "target": "com.amazonaws.datazone#InternalServerException" + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the data product revision is created.

" + } }, - { - "target": "com.amazonaws.datazone#ResourceNotFoundException" + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The user who created the data product revision.

" + } }, - { - "target": "com.amazonaws.datazone#ThrottlingException" + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the first revision of the data product is created.

" + } }, - { - "target": "com.amazonaws.datazone#ValidationException" + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The user who created the first revision of the data product.

" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Create an Amazon DataZone environment.

", - "smithy.api#http": { - "code": 201, - "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environments" - } + "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateEnvironmentAction": { + "com.amazonaws.datazone#CreateDataSource": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateEnvironmentActionInput" + "target": "com.amazonaws.datazone#CreateDataSourceInput" }, "output": { - "target": "com.amazonaws.datazone#CreateEnvironmentActionOutput" + "target": "com.amazonaws.datazone#CreateDataSourceOutput" }, "errors": [ { @@ -2700,6 +2949,9 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -2708,316 +2960,236 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an action for the environment, for example, creates a console link for an\n analytics tool that is available in this environment.

", + "smithy.api#documentation": "

Creates an Amazon DataZone data source.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions" - } + "uri": "/v2/domains/{domainIdentifier}/data-sources" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateEnvironmentActionInput": { + "com.amazonaws.datazone#CreateDataSourceInput": { "type": "structure", "members": { + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the data source.

" + } + }, "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment action is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the data source is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "environmentIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentId", + "projectIdentifier": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the environment in which the environment action is created.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The identifier of the Amazon DataZone project in which you want to add this data\n source.

", "smithy.api#required": {} } }, - "name": { + "environmentIdentifier": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.

", "smithy.api#required": {} } }, - "parameters": { - "target": "com.amazonaws.datazone#ActionParameters", + "type": { + "target": "com.amazonaws.datazone#DataSourceType", "traits": { - "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#documentation": "

The type of the data source.

", "smithy.api#required": {} } }, - "description": { - "target": "smithy.api#String", + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationInput", "traits": { - "smithy.api#documentation": "

The description of the environment action that is being created in the\n environment.

" + "smithy.api#documentation": "

Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration or redshiftRunConfiguration.

", + "smithy.api#notProperty": {} } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.datazone#CreateEnvironmentActionOutput": { - "type": "structure", - "members": { - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", "traits": { - "smithy.api#documentation": "

The ID of the domain in which the environment action is created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Specifies whether the business name generation is to be enabled for this data\n source.

" } }, - "environmentId": { - "target": "com.amazonaws.datazone#EnvironmentId", + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", "traits": { - "smithy.api#documentation": "

The ID of the environment in which the environment is created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Specifies whether the data source is enabled.

" } }, - "id": { - "target": "com.amazonaws.datazone#EnvironmentActionId", + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", "traits": { - "smithy.api#documentation": "

The ID of the environment action.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The schedule of the data source runs.

" } }, - "name": { - "target": "smithy.api#String", + "publishOnImport": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

The name of the environment action.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" } }, - "parameters": { - "target": "com.amazonaws.datazone#ActionParameters", + "assetFormsInput": { + "target": "com.amazonaws.datazone#FormInputList", "traits": { - "smithy.api#documentation": "

The parameters of the environment action.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The metadata forms that are to be attached to the assets that this data source works\n with.

" } }, - "description": { + "clientToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The description of the environment action.

" + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} } } }, "traits": { - "smithy.api#output": {} + "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateEnvironmentInput": { + "com.amazonaws.datazone#CreateDataSourceOutput": { "type": "structure", "members": { - "projectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "id": { + "target": "com.amazonaws.datazone#DataSourceId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone project in which this environment is created.

", + "smithy.api#documentation": "

The unique identifier of the data source.

", "smithy.api#required": {} } }, - "domainIdentifier": { - "target": "com.amazonaws.datazone#DomainId", + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment is created.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

The status of the data source.

" } }, - "description": { - "target": "smithy.api#String", + "type": { + "target": "com.amazonaws.datazone#DataSourceType", "traits": { - "smithy.api#documentation": "

The description of the Amazon DataZone environment.

" + "smithy.api#documentation": "

The type of the data source.

" } }, "name": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#Name", "traits": { - "smithy.api#documentation": "

The name of the Amazon DataZone environment.

", + "smithy.api#documentation": "

The name of the data source.

", "smithy.api#required": {} } }, - "environmentProfileIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentProfileId", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The identifier of the environment profile that is used to create this Amazon DataZone\n environment.

", + "smithy.api#documentation": "

The description of the data source.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source is created.

", "smithy.api#required": {} } }, - "userParameters": { - "target": "com.amazonaws.datazone#EnvironmentParametersList", + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" + "smithy.api#documentation": "

The ID of the Amazon DataZone project to which the data source is added.

", + "smithy.api#required": {} } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.

", + "smithy.api#required": {} } }, - "environmentAccountIdentifier": { - "target": "smithy.api#String", + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", "traits": { - "smithy.api#documentation": "

The ID of the account in which the environment is being created.

" + "smithy.api#documentation": "

Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration or redshiftRunConfiguration.

", + "smithy.api#notProperty": {} } }, - "environmentAccountRegion": { - "target": "smithy.api#String", + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", "traits": { - "smithy.api#documentation": "

The region of the account in which the environment is being created.

" + "smithy.api#documentation": "

Specifies whether the business name generation is to be enabled for this data\n source.

" } }, - "environmentBlueprintIdentifier": { - "target": "smithy.api#String", + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", "traits": { - "smithy.api#documentation": "

The ID of the blueprint with which the environment is being created.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.datazone#CreateEnvironmentOutput": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment is created.

", - "smithy.api#required": {} - } - }, - "id": { - "target": "com.amazonaws.datazone#EnvironmentId", - "traits": { - "smithy.api#documentation": "

The ID of this Amazon DataZone environment.

" - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", - "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment is created.

", - "smithy.api#required": {} - } - }, - "createdBy": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created this environment.

", - "smithy.api#required": {} - } - }, - "createdAt": { - "target": "smithy.api#Timestamp", - "traits": { - "smithy.api#documentation": "

The timestamp of when the environment was created.

", - "smithy.api#timestampFormat": "date-time" - } - }, - "updatedAt": { - "target": "smithy.api#Timestamp", - "traits": { - "smithy.api#documentation": "

The timestamp of when this environment was updated.

", - "smithy.api#timestampFormat": "date-time" - } - }, - "name": { - "target": "com.amazonaws.datazone#EnvironmentName", - "traits": { - "smithy.api#documentation": "

The name of this environment.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of this Amazon DataZone environment.

" - } - }, - "environmentProfileId": { - "target": "com.amazonaws.datazone#EnvironmentProfileId", - "traits": { - "smithy.api#addedDefault": {}, - "smithy.api#default": "", - "smithy.api#documentation": "

The ID of the environment profile with which this Amazon DataZone environment was\n created.

" - } - }, - "awsAccountId": { - "target": "com.amazonaws.datazone#AwsAccountId", - "traits": { - "smithy.api#documentation": "

The Amazon Web Services account in which the Amazon DataZone environment is created.

" - } - }, - "awsAccountRegion": { - "target": "com.amazonaws.datazone#AwsRegion", - "traits": { - "smithy.api#documentation": "

The Amazon Web Services region in which the Amazon DataZone environment is created.

" - } - }, - "provider": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The provider of this Amazon DataZone environment.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Specifies whether the data source is enabled.

" } }, - "provisionedResources": { - "target": "com.amazonaws.datazone#ResourceList", + "publishOnImport": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

The provisioned resources of this Amazon DataZone environment.

" + "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" } }, - "status": { - "target": "com.amazonaws.datazone#EnvironmentStatus", + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", "traits": { - "smithy.api#documentation": "

The status of this Amazon DataZone environment.

" + "smithy.api#documentation": "

The metadata forms attached to the assets that this data source creates.

" } }, - "environmentActions": { - "target": "com.amazonaws.datazone#EnvironmentActionList", + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", "traits": { - "smithy.api#documentation": "

The configurable actions of this Amazon DataZone environment.

" + "smithy.api#documentation": "

The schedule of the data source runs.

" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", "traits": { - "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" + "smithy.api#documentation": "

The status of the last run of this data source.

" } }, - "userParameters": { - "target": "com.amazonaws.datazone#CustomParameterList", + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", "traits": { - "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" + "smithy.api#documentation": "

The timestamp that specifies when the data source was last run.

" } }, - "lastDeployment": { - "target": "com.amazonaws.datazone#Deployment", + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", "traits": { - "smithy.api#documentation": "

The details of the last deployment of this Amazon DataZone environment.

" + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" } }, - "provisioningProperties": { - "target": "com.amazonaws.datazone#ProvisioningProperties", + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", "traits": { - "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone environment.

" + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" } }, - "deploymentProperties": { - "target": "com.amazonaws.datazone#DeploymentProperties", + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", "traits": { - "smithy.api#documentation": "

The deployment properties of this Amazon DataZone environment.

" + "smithy.api#documentation": "

The timestamp of when the data source was created.

" } }, - "environmentBlueprintId": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", "traits": { - "smithy.api#documentation": "

The ID of the blueprint with which this Amazon DataZone environment was created.

" + "smithy.api#documentation": "

The timestamp of when the data source was updated.

" } } }, @@ -3025,13 +3197,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateEnvironmentProfile": { + "com.amazonaws.datazone#CreateDomain": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateEnvironmentProfileInput" + "target": "com.amazonaws.datazone#CreateDomainInput" }, "output": { - "target": "com.amazonaws.datazone#CreateEnvironmentProfileOutput" + "target": "com.amazonaws.datazone#CreateDomainOutput" }, "errors": [ { @@ -3057,68 +3229,64 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon DataZone environment profile.

", + "smithy.api#documentation": "

Creates an Amazon DataZone domain.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environment-profiles" - } + "uri": "/v2/domains" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] } }, - "com.amazonaws.datazone#CreateEnvironmentProfileInput": { + "com.amazonaws.datazone#CreateDomainInput": { "type": "structure", "members": { - "domainIdentifier": { - "target": "com.amazonaws.datazone#DomainId", - "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile is created.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, "name": { - "target": "com.amazonaws.datazone#EnvironmentProfileName", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of this Amazon DataZone environment profile.

", + "smithy.api#documentation": "

The name of the Amazon DataZone domain.

", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.datazone#Description", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The description of this Amazon DataZone environment profile.

" + "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" } }, - "environmentBlueprintIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", "traits": { - "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The single-sign on configuration of the Amazon DataZone domain.

" } }, - "projectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", "traits": { - "smithy.api#documentation": "

The identifier of the project in which to create the environment profile.

", + "smithy.api#documentation": "

The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.

", "smithy.api#required": {} } }, - "userParameters": { - "target": "com.amazonaws.datazone#EnvironmentParametersList", + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment profile.

" + "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" } }, - "awsAccountId": { - "target": "com.amazonaws.datazone#AwsAccountId", + "tags": { + "target": "com.amazonaws.datazone#Tags", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account in which the Amazon DataZone environment is created.

" + "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" } }, - "awsAccountRegion": { - "target": "com.amazonaws.datazone#AwsRegion", + "clientToken": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services region in which this environment profile is created.

" + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} } } }, @@ -3126,86 +3294,68 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateEnvironmentProfileOutput": { + "com.amazonaws.datazone#CreateDomainOutput": { "type": "structure", "members": { "id": { - "target": "com.amazonaws.datazone#EnvironmentProfileId", + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of this Amazon DataZone environment profile.

", + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", "smithy.api#required": {} } }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile is created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The name of the Amazon DataZone domain.

" } }, - "awsAccountId": { - "target": "com.amazonaws.datazone#AwsAccountId", + "description": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID in which this Amazon DataZone environment profile is\n created.

" + "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" } }, - "awsAccountRegion": { - "target": "com.amazonaws.datazone#AwsRegion", + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", "traits": { - "smithy.api#documentation": "

The Amazon Web Services region in which this Amazon DataZone environment profile is\n created.

" + "smithy.api#documentation": "

The single-sign on configuration of the Amazon DataZone domain.

" } }, - "createdBy": { - "target": "smithy.api#String", + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created this environment profile.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.

" } }, - "createdAt": { - "target": "smithy.api#Timestamp", + "arn": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The timestamp of when this environment profile was created.

", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "

The ARN of the Amazon DataZone domain.

" } }, - "updatedAt": { - "target": "smithy.api#Timestamp", + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The timestamp of when this environment profile was updated.

", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" } }, - "name": { - "target": "com.amazonaws.datazone#EnvironmentProfileName", - "traits": { - "smithy.api#documentation": "

The name of this Amazon DataZone environment profile.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of this Amazon DataZone environment profile.

" - } - }, - "environmentBlueprintId": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "status": { + "target": "com.amazonaws.datazone#DomainStatus", "traits": { - "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The status of the Amazon DataZone domain.

" } }, - "projectId": { - "target": "com.amazonaws.datazone#ProjectId", + "portalUrl": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment profile is created.

" + "smithy.api#documentation": "

The URL of the data portal for this Amazon DataZone domain.

" } }, - "userParameters": { - "target": "com.amazonaws.datazone#CustomParameterList", + "tags": { + "target": "com.amazonaws.datazone#Tags", "traits": { - "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment profile.

" + "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" } } }, @@ -3213,13 +3363,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateFormType": { + "com.amazonaws.datazone#CreateEnvironment": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateFormTypeInput" + "target": "com.amazonaws.datazone#CreateEnvironmentInput" }, "output": { - "target": "com.amazonaws.datazone#CreateFormTypeOutput" + "target": "com.amazonaws.datazone#CreateEnvironmentOutput" }, "errors": [ { @@ -3232,7 +3382,7 @@ "target": "com.amazonaws.datazone#InternalServerException" }, { - "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + "target": "com.amazonaws.datazone#ResourceNotFoundException" }, { "target": "com.amazonaws.datazone#ThrottlingException" @@ -3242,56 +3392,88 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a metadata form type.

", + "smithy.api#documentation": "

Create an Amazon DataZone environment.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/form-types" + "uri": "/v2/domains/{domainIdentifier}/environments" } } }, - "com.amazonaws.datazone#CreateFormTypeInput": { + "com.amazonaws.datazone#CreateEnvironmentAction": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateEnvironmentActionInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateEnvironmentActionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an action for the environment, for example, creates a console link for an\n analytics tool that is available in this environment.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions" + } + } + }, + "com.amazonaws.datazone#CreateEnvironmentActionInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment action is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#FormTypeName", + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The name of this Amazon DataZone metadata form type.

", + "smithy.api#documentation": "

The ID of the environment in which the environment action is created.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "model": { - "target": "com.amazonaws.datazone#Model", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The model of this Amazon DataZone metadata form type.

", + "smithy.api#documentation": "

The name of the environment action.

", "smithy.api#required": {} } }, - "owningProjectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns this metadata form type.

", + "smithy.api#documentation": "

The parameters of the environment action.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#FormTypeStatus", - "traits": { - "smithy.api#documentation": "

The status of this Amazon DataZone metadata form type.

" - } - }, "description": { - "target": "com.amazonaws.datazone#Description", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The description of this Amazon DataZone metadata form type.

" + "smithy.api#documentation": "

The description of the environment action that is being created in the\n environment.

" } } }, @@ -3299,52 +3481,48 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateFormTypeOutput": { + "com.amazonaws.datazone#CreateEnvironmentActionOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type is created.

", + "smithy.api#documentation": "

The ID of the domain in which the environment action is created.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#FormTypeName", + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The name of this Amazon DataZone metadata form type.

", + "smithy.api#documentation": "

The ID of the environment in which the environment is created.

", "smithy.api#required": {} } }, - "revision": { - "target": "com.amazonaws.datazone#Revision", + "id": { + "target": "com.amazonaws.datazone#EnvironmentActionId", "traits": { - "smithy.api#documentation": "

The revision of this Amazon DataZone metadata form type.

", + "smithy.api#documentation": "

The ID of the environment action.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of this Amazon DataZone metadata form type.

" - } - }, - "owningProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the project that owns this Amazon DataZone metadata form type.

" + "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#required": {} } }, - "originDomainId": { - "target": "com.amazonaws.datazone#DomainId", + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type was originally\n created.

" + "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#required": {} } }, - "originProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "description": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the project in which this Amazon DataZone metadata form type was originally\n created.

" + "smithy.api#documentation": "

The description of the environment action.

" } } }, @@ -3352,86 +3530,72 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateGlossary": { - "type": "operation", - "input": { - "target": "com.amazonaws.datazone#CreateGlossaryInput" - }, - "output": { - "target": "com.amazonaws.datazone#CreateGlossaryOutput" - }, - "errors": [ - { - "target": "com.amazonaws.datazone#AccessDeniedException" - }, - { - "target": "com.amazonaws.datazone#ConflictException" - }, - { - "target": "com.amazonaws.datazone#InternalServerException" - }, - { - "target": "com.amazonaws.datazone#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.datazone#ThrottlingException" - }, - { - "target": "com.amazonaws.datazone#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

Creates an Amazon DataZone business glossary.

", - "smithy.api#http": { - "code": 201, - "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/glossaries" - }, - "smithy.api#idempotent": {} - } - }, - "com.amazonaws.datazone#CreateGlossaryInput": { + "com.amazonaws.datazone#CreateEnvironmentInput": { "type": "structure", "members": { + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project in which this environment is created.

", + "smithy.api#required": {} + } + }, "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary is created.

", + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone environment.

" + } + }, "name": { - "target": "com.amazonaws.datazone#GlossaryName", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of this business glossary.

", + "smithy.api#documentation": "

The name of the Amazon DataZone environment.

", "smithy.api#required": {} } }, - "owningProjectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "environmentProfileIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "

The ID of the project that currently owns business glossary.

", + "smithy.api#documentation": "

The identifier of the environment profile that is used to create this Amazon DataZone\n environment.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#GlossaryDescription", + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", "traits": { - "smithy.api#documentation": "

The description of this business glossary.

" + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" } }, - "status": { - "target": "com.amazonaws.datazone#GlossaryStatus", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

The status of this business glossary.

" + "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" } }, - "clientToken": { - "target": "com.amazonaws.datazone#ClientToken", + "environmentAccountIdentifier": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "

The ID of the account in which the environment is being created.

" + } + }, + "environmentAccountRegion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The region of the account in which the environment is being created.

" + } + }, + "environmentBlueprintIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which the environment is being created.

" } } }, @@ -3439,47 +3603,142 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateGlossaryOutput": { + "com.amazonaws.datazone#CreateEnvironmentOutput": { "type": "structure", "members": { - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment is created.

", "smithy.api#required": {} } }, "id": { - "target": "com.amazonaws.datazone#GlossaryId", + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The ID of this business glossary.

", + "smithy.api#documentation": "

The ID of this Amazon DataZone environment.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment is created.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#GlossaryName", + "createdBy": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of this business glossary.

", + "smithy.api#documentation": "

The Amazon DataZone user who created this environment.

", "smithy.api#required": {} } }, - "owningProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "createdAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The ID of the project that currently owns this business glossary.

", + "smithy.api#documentation": "

The timestamp of when the environment was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentName", + "traits": { + "smithy.api#documentation": "

The name of this environment.

", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.datazone#GlossaryDescription", + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The description of this business glossary.

" + "smithy.api#documentation": "

The description of this Amazon DataZone environment.

" + } + }, + "environmentProfileId": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The ID of the environment profile with which this Amazon DataZone environment was\n created.

" + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account in which the Amazon DataZone environment is created.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region in which the Amazon DataZone environment is created.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of this Amazon DataZone environment.

", + "smithy.api#required": {} + } + }, + "provisionedResources": { + "target": "com.amazonaws.datazone#ResourceList", + "traits": { + "smithy.api#documentation": "

The provisioned resources of this Amazon DataZone environment.

" } }, "status": { - "target": "com.amazonaws.datazone#GlossaryStatus", + "target": "com.amazonaws.datazone#EnvironmentStatus", "traits": { - "smithy.api#documentation": "

The status of this business glossary.

" + "smithy.api#documentation": "

The status of this Amazon DataZone environment.

" + } + }, + "environmentActions": { + "target": "com.amazonaws.datazone#EnvironmentActionList", + "traits": { + "smithy.api#documentation": "

The configurable actions of this Amazon DataZone environment.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" + } + }, + "lastDeployment": { + "target": "com.amazonaws.datazone#Deployment", + "traits": { + "smithy.api#documentation": "

The details of the last deployment of this Amazon DataZone environment.

" + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone environment.

" + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "

The deployment properties of this Amazon DataZone environment.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which this Amazon DataZone environment was created.

" } } }, @@ -3487,13 +3746,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateGlossaryTerm": { + "com.amazonaws.datazone#CreateEnvironmentProfile": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateGlossaryTermInput" + "target": "com.amazonaws.datazone#CreateEnvironmentProfileInput" }, "output": { - "target": "com.amazonaws.datazone#CreateGlossaryTermOutput" + "target": "com.amazonaws.datazone#CreateEnvironmentProfileOutput" }, "errors": [ { @@ -3519,69 +3778,68 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a business glossary term.

", + "smithy.api#documentation": "

Creates an Amazon DataZone environment profile.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/glossary-terms" - }, - "smithy.api#idempotent": {} + "uri": "/v2/domains/{domainIdentifier}/environment-profiles" + } } }, - "com.amazonaws.datazone#CreateGlossaryTermInput": { + "com.amazonaws.datazone#CreateEnvironmentProfileInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "glossaryIdentifier": { - "target": "com.amazonaws.datazone#GlossaryTermId", + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", "traits": { - "smithy.api#documentation": "

The ID of the business glossary in which this term is created.

", + "smithy.api#documentation": "

The name of this Amazon DataZone environment profile.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#GlossaryTermName", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The name of this business glossary term.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of this Amazon DataZone environment profile.

" } }, - "status": { - "target": "com.amazonaws.datazone#GlossaryTermStatus", + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", "traits": { - "smithy.api#documentation": "

The status of this business glossary term.

" + "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", + "smithy.api#required": {} } }, - "shortDescription": { - "target": "com.amazonaws.datazone#ShortDescription", + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The short description of this business glossary term.

" + "smithy.api#documentation": "

The identifier of the project in which to create the environment profile.

", + "smithy.api#required": {} } }, - "longDescription": { - "target": "com.amazonaws.datazone#LongDescription", + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", "traits": { - "smithy.api#documentation": "

The long description of this business glossary term.

" + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment profile.

" } }, - "termRelations": { - "target": "com.amazonaws.datazone#TermRelations", + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", "traits": { - "smithy.api#documentation": "

The term relations of this business glossary term.

" + "smithy.api#documentation": "

The Amazon Web Services account in which the Amazon DataZone environment is created.

" } }, - "clientToken": { - "target": "com.amazonaws.datazone#ClientToken", + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "

The Amazon Web Services region in which this environment profile is created.

" } } }, @@ -3589,60 +3847,86 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateGlossaryTermOutput": { + "com.amazonaws.datazone#CreateEnvironmentProfileOutput": { "type": "structure", "members": { "id": { - "target": "com.amazonaws.datazone#GlossaryTermId", + "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "

The ID of this business glossary term.

", + "smithy.api#documentation": "

The ID of this Amazon DataZone environment profile.

", "smithy.api#required": {} } }, "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile is created.

", "smithy.api#required": {} } }, - "glossaryId": { - "target": "com.amazonaws.datazone#GlossaryId", + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", "traits": { - "smithy.api#documentation": "

The ID of the business glossary in which this term is created.

", + "smithy.api#documentation": "

The Amazon Web Services account ID in which this Amazon DataZone environment profile is\n created.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region in which this Amazon DataZone environment profile is\n created.

" + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created this environment profile.

", "smithy.api#required": {} } }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment profile was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment profile was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, "name": { - "target": "com.amazonaws.datazone#GlossaryTermName", + "target": "com.amazonaws.datazone#EnvironmentProfileName", "traits": { - "smithy.api#documentation": "

The name of this business glossary term.

", + "smithy.api#documentation": "

The name of this Amazon DataZone environment profile.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#GlossaryTermStatus", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The status of this business glossary term.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of this Amazon DataZone environment profile.

" } }, - "shortDescription": { - "target": "com.amazonaws.datazone#ShortDescription", + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", "traits": { - "smithy.api#documentation": "

The short description of this business glossary term.

" + "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", + "smithy.api#required": {} } }, - "longDescription": { - "target": "com.amazonaws.datazone#LongDescription", + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The long description of this business glossary term.

" + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment profile is created.

" } }, - "termRelations": { - "target": "com.amazonaws.datazone#TermRelations", + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", "traits": { - "smithy.api#documentation": "

The term relations of this business glossary term.

" + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment profile.

" } } }, @@ -3650,64 +3934,85 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateGroupProfile": { + "com.amazonaws.datazone#CreateFormType": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateGroupProfileInput" + "target": "com.amazonaws.datazone#CreateFormTypeInput" }, "output": { - "target": "com.amazonaws.datazone#CreateGroupProfileOutput" + "target": "com.amazonaws.datazone#CreateFormTypeOutput" }, "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, { - "target": "com.amazonaws.datazone#ResourceNotFoundException" + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" }, { "target": "com.amazonaws.datazone#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Creates a group profile in Amazon DataZone.

", + "smithy.api#documentation": "

Creates a metadata form type.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/group-profiles" - }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "uri": "/v2/domains/{domainIdentifier}/form-types" + } } }, - "com.amazonaws.datazone#CreateGroupProfileInput": { + "com.amazonaws.datazone#CreateFormTypeInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "groupIdentifier": { - "target": "com.amazonaws.datazone#GroupIdentifier", + "name": { + "target": "com.amazonaws.datazone#FormTypeName", "traits": { - "smithy.api#documentation": "

The identifier of the group for which the group profile is created.

", + "smithy.api#documentation": "

The name of this Amazon DataZone metadata form type.

", "smithy.api#required": {} } }, - "clientToken": { - "target": "smithy.api#String", + "model": { + "target": "com.amazonaws.datazone#Model", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "

The model of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns this metadata form type.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#FormTypeStatus", + "traits": { + "smithy.api#documentation": "

The status of this Amazon DataZone metadata form type.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone metadata form type.

" } } }, @@ -3715,31 +4020,52 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateGroupProfileOutput": { + "com.amazonaws.datazone#CreateFormTypeOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile is created.

" + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type is created.

", + "smithy.api#required": {} } }, - "id": { - "target": "com.amazonaws.datazone#GroupProfileId", + "name": { + "target": "com.amazonaws.datazone#FormTypeName", "traits": { - "smithy.api#documentation": "

The identifier of the group profile.

" + "smithy.api#documentation": "

The name of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#GroupProfileStatus", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The status of the group profile.

" + "smithy.api#documentation": "

The revision of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} } }, - "groupName": { - "target": "com.amazonaws.datazone#GroupProfileName", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The name of the group for which group profile is created.

" + "smithy.api#documentation": "

The description of this Amazon DataZone metadata form type.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that owns this Amazon DataZone metadata form type.

" + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type was originally\n created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project in which this Amazon DataZone metadata form type was originally\n created.

" } } }, @@ -3747,13 +4073,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateListingChangeSet": { + "com.amazonaws.datazone#CreateGlossary": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateListingChangeSetInput" + "target": "com.amazonaws.datazone#CreateGlossaryInput" }, "output": { - "target": "com.amazonaws.datazone#CreateListingChangeSetOutput" + "target": "com.amazonaws.datazone#CreateGlossaryOutput" }, "errors": [ { @@ -3765,9 +4091,6 @@ { "target": "com.amazonaws.datazone#InternalServerException" }, - { - "target": "com.amazonaws.datazone#ResourceNotFoundException" - }, { "target": "com.amazonaws.datazone#ServiceQuotaExceededException" }, @@ -3779,50 +4102,50 @@ } ], "traits": { - "smithy.api#documentation": "

Publishes a listing (a record of an asset at a given time) or removes a listing from the\n catalog.

", + "smithy.api#documentation": "

Creates an Amazon DataZone business glossary.

", "smithy.api#http": { - "code": 200, + "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/listings/change-set" - } + "uri": "/v2/domains/{domainIdentifier}/glossaries" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateListingChangeSetInput": { + "com.amazonaws.datazone#CreateGlossaryInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "entityIdentifier": { - "target": "com.amazonaws.datazone#EntityIdentifier", + "name": { + "target": "com.amazonaws.datazone#GlossaryName", "traits": { - "smithy.api#documentation": "

The ID of the asset.

", + "smithy.api#documentation": "

The name of this business glossary.

", "smithy.api#required": {} } }, - "entityType": { - "target": "com.amazonaws.datazone#EntityType", + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The type of an entity.

", + "smithy.api#documentation": "

The ID of the project that currently owns business glossary.

", "smithy.api#required": {} } }, - "entityRevision": { - "target": "com.amazonaws.datazone#Revision", + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", "traits": { - "smithy.api#documentation": "

The revision of an asset.

" + "smithy.api#documentation": "

The description of this business glossary.

" } }, - "action": { - "target": "com.amazonaws.datazone#ChangeAction", + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", "traits": { - "smithy.api#documentation": "

Specifies whether to publish or unpublish a listing.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The status of this business glossary.

" } }, "clientToken": { @@ -3837,42 +4160,61 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateListingChangeSetOutput": { + "com.amazonaws.datazone#CreateGlossaryOutput": { "type": "structure", "members": { - "listingId": { - "target": "com.amazonaws.datazone#ListingId", + "domainId": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the listing (a record of an asset at a given time).

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary is created.

", "smithy.api#required": {} } }, - "listingRevision": { - "target": "com.amazonaws.datazone#Revision", + "id": { + "target": "com.amazonaws.datazone#GlossaryId", "traits": { - "smithy.api#documentation": "

The revision of a listing.

", + "smithy.api#documentation": "

The ID of this business glossary.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#ListingStatus", + "name": { + "target": "com.amazonaws.datazone#GlossaryName", "traits": { - "smithy.api#documentation": "

Specifies the status of the listing.

", + "smithy.api#documentation": "

The name of this business glossary.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that currently owns this business glossary.

", "smithy.api#required": {} } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The description of this business glossary.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The status of this business glossary.

" + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateProject": { + "com.amazonaws.datazone#CreateGlossaryTerm": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateProjectInput" + "target": "com.amazonaws.datazone#CreateGlossaryTermInput" }, "output": { - "target": "com.amazonaws.datazone#CreateProjectOutput" + "target": "com.amazonaws.datazone#CreateGlossaryTermOutput" }, "errors": [ { @@ -3898,42 +4240,69 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon DataZone project.

", + "smithy.api#documentation": "

Creates a business glossary term.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/projects" - } + "uri": "/v2/domains/{domainIdentifier}/glossary-terms" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateProjectInput": { + "com.amazonaws.datazone#CreateGlossaryTermInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this project is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, + "glossaryIdentifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary in which this term is created.

", + "smithy.api#required": {} + } + }, "name": { - "target": "com.amazonaws.datazone#ProjectName", + "target": "com.amazonaws.datazone#GlossaryTermName", "traits": { - "smithy.api#documentation": "

The name of the Amazon DataZone project.

", + "smithy.api#documentation": "

The name of this business glossary term.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", "traits": { - "smithy.api#documentation": "

The description of the Amazon DataZone project.

" + "smithy.api#documentation": "

The status of this business glossary term.

" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", "traits": { - "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone project.

" + "smithy.api#documentation": "

The short description of this business glossary term.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description of this business glossary term.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The term relations of this business glossary term.

" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} } } }, @@ -3941,15 +4310,76 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateProjectMembership": { - "type": "operation", - "input": { - "target": "com.amazonaws.datazone#CreateProjectMembershipInput" - }, - "output": { - "target": "com.amazonaws.datazone#CreateProjectMembershipOutput" - }, - "errors": [ + "com.amazonaws.datazone#CreateGlossaryTermOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of this business glossary term.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term is created.

", + "smithy.api#required": {} + } + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary in which this term is created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name of this business glossary term.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status of this business glossary term.

", + "smithy.api#required": {} + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short description of this business glossary term.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description of this business glossary term.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The term relations of this business glossary term.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateGroupProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateGroupProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateGroupProfileOutput" + }, + "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, @@ -3964,45 +4394,41 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a project membership in Amazon DataZone.

", + "smithy.api#documentation": "

Creates a group profile in Amazon DataZone.

", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership" - } + "uri": "/v2/domains/{domainIdentifier}/group-profiles" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] } }, - "com.amazonaws.datazone#CreateProjectMembershipInput": { + "com.amazonaws.datazone#CreateGroupProfileInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which project membership is created.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "

The ID of the project for which this project membership was created.

", + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "member": { - "target": "com.amazonaws.datazone#Member", + "groupIdentifier": { + "target": "com.amazonaws.datazone#GroupIdentifier", "traits": { - "smithy.api#documentation": "

The project member whose project membership was created.

", + "smithy.api#documentation": "

The identifier of the group for which the group profile is created.

", "smithy.api#required": {} } }, - "designation": { - "target": "com.amazonaws.datazone#UserDesignation", + "clientToken": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The designation of the project membership.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} } } }, @@ -4010,80 +4436,31 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateProjectMembershipOutput": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.datazone#CreateProjectOutput": { + "com.amazonaws.datazone#CreateGroupProfileOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the project was created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile is created.

" } }, "id": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project.

", - "smithy.api#required": {} - } - }, - "name": { - "target": "com.amazonaws.datazone#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of the project.

" - } - }, - "projectStatus": { - "target": "com.amazonaws.datazone#ProjectStatus", - "traits": { - "smithy.api#documentation": "

The status of the Amazon DataZone project that was created.

" - } - }, - "failureReasons": { - "target": "com.amazonaws.datazone#FailureReasons", - "traits": { - "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" - } - }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", - "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the project.

", - "smithy.api#required": {} - } - }, - "createdAt": { - "target": "smithy.api#Timestamp", + "target": "com.amazonaws.datazone#GroupProfileId", "traits": { - "smithy.api#documentation": "

The timestamp of when the project was created.

", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "

The identifier of the group profile.

" } }, - "lastUpdatedAt": { - "target": "smithy.api#Timestamp", + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", "traits": { - "smithy.api#documentation": "

The timestamp of when the project was last updated.

", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "

The status of the group profile.

" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "groupName": { + "target": "com.amazonaws.datazone#GroupProfileName", "traits": { - "smithy.api#documentation": "

The glossary terms that can be used in the project.

" + "smithy.api#documentation": "

The name of the group for which group profile is created.

" } } }, @@ -4091,13 +4468,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateSubscriptionGrant": { + "com.amazonaws.datazone#CreateListingChangeSet": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateSubscriptionGrantInput" + "target": "com.amazonaws.datazone#CreateListingChangeSetInput" }, "output": { - "target": "com.amazonaws.datazone#CreateSubscriptionGrantOutput" + "target": "com.amazonaws.datazone#CreateListingChangeSetOutput" }, "errors": [ { @@ -4112,6 +4489,9 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -4120,54 +4500,54 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a subsscription grant in Amazon DataZone.

", + "smithy.api#documentation": "

Publishes a listing (a record of an asset at a given time) or removes a listing from the\n catalog.

", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/subscription-grants" + "uri": "/v2/domains/{domainIdentifier}/listings/change-set" } } }, - "com.amazonaws.datazone#CreateSubscriptionGrantInput": { + "com.amazonaws.datazone#CreateListingChangeSetInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "environmentIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentId", + "entityIdentifier": { + "target": "com.amazonaws.datazone#EntityIdentifier", "traits": { - "smithy.api#documentation": "

The ID of the environment in which the subscription grant is created.

", + "smithy.api#documentation": "

The ID of the asset.

", "smithy.api#required": {} } }, - "subscriptionTargetIdentifier": { - "target": "com.amazonaws.datazone#SubscriptionTargetId", + "entityType": { + "target": "com.amazonaws.datazone#EntityType", "traits": { - "smithy.api#documentation": "

The ID of the subscription target for which the subscription grant is created.

", + "smithy.api#documentation": "

The type of an entity.

", "smithy.api#required": {} } }, - "grantedEntity": { - "target": "com.amazonaws.datazone#GrantedEntityInput", + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The entity to which the subscription is to be granted.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The revision of an asset.

" } }, - "assetTargetNames": { - "target": "com.amazonaws.datazone#AssetTargetNames", + "action": { + "target": "com.amazonaws.datazone#ChangeAction", "traits": { - "smithy.api#documentation": "

The names of the assets for which the subscription grant is created.

" + "smithy.api#documentation": "

Specifies whether to publish or unpublish a listing.

", + "smithy.api#required": {} } }, "clientToken": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#ClientToken", "traits": { "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", "smithy.api#idempotencyToken": {} @@ -4178,95 +4558,42 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateSubscriptionGrantOutput": { + "com.amazonaws.datazone#CreateListingChangeSetOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#SubscriptionGrantId", - "traits": { - "smithy.api#documentation": "

The ID of the subscription grant.

", - "smithy.api#required": {} - } - }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "listingId": { + "target": "com.amazonaws.datazone#ListingId", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the subscription grant.

", + "smithy.api#documentation": "

The ID of the listing (a record of an asset at a given time).

", "smithy.api#required": {} } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", - "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant.

" - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is created.

", + "smithy.api#documentation": "

The revision of a listing.

", "smithy.api#required": {} } }, - "createdAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "status": { + "target": "com.amazonaws.datazone#ListingStatus", "traits": { - "smithy.api#documentation": "

A timestamp of when the subscription grant is created.

", + "smithy.api#documentation": "

Specifies the status of the listing.

", "smithy.api#required": {} } - }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", - "traits": { - "smithy.api#documentation": "

A timestamp of when the subscription grant was updated.

", - "smithy.api#required": {} - } - }, - "subscriptionTargetId": { - "target": "com.amazonaws.datazone#SubscriptionTargetId", - "traits": { - "smithy.api#documentation": "

The ID of the subscription target for which the subscription grant is created.

", - "smithy.api#required": {} - } - }, - "grantedEntity": { - "target": "com.amazonaws.datazone#GrantedEntity", - "traits": { - "smithy.api#documentation": "

The entity to which the subscription is granted.

", - "smithy.api#required": {} - } - }, - "status": { - "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", - "traits": { - "smithy.api#documentation": "

The status of the subscription grant.

", - "smithy.api#required": {} - } - }, - "assets": { - "target": "com.amazonaws.datazone#SubscribedAssets", - "traits": { - "smithy.api#documentation": "

The assets for which the subscription grant is created.

" - } - }, - "subscriptionId": { - "target": "com.amazonaws.datazone#SubscriptionId", - "traits": { - "smithy.api#documentation": "

The identifier of the subscription grant.

" - } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateSubscriptionRequest": { + "com.amazonaws.datazone#CreateProject": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateSubscriptionRequestInput" + "target": "com.amazonaws.datazone#CreateProjectInput" }, "output": { - "target": "com.amazonaws.datazone#CreateSubscriptionRequestOutput" + "target": "com.amazonaws.datazone#CreateProjectOutput" }, "errors": [ { @@ -4281,6 +4608,9 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -4289,51 +4619,42 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a subscription request in Amazon DataZone.

", + "smithy.api#documentation": "

Creates an Amazon DataZone project.

", "smithy.api#http": { - "code": 200, + "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/subscription-requests" + "uri": "/v2/domains/{domainIdentifier}/projects" } } }, - "com.amazonaws.datazone#CreateSubscriptionRequestInput": { + "com.amazonaws.datazone#CreateProjectInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription request is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this project is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "subscribedPrincipals": { - "target": "com.amazonaws.datazone#SubscribedPrincipalInputs", - "traits": { - "smithy.api#documentation": "

The Amazon DataZone principals for whom the subscription request is created.

", - "smithy.api#required": {} - } - }, - "subscribedListings": { - "target": "com.amazonaws.datazone#SubscribedListingInputs", + "name": { + "target": "com.amazonaws.datazone#ProjectName", "traits": { - "smithy.api#documentation": "

The published asset for which the subscription grant is to be created.

", + "smithy.api#documentation": "

The name of the Amazon DataZone project.

", "smithy.api#required": {} } }, - "requestReason": { - "target": "com.amazonaws.datazone#RequestReason", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The reason for the subscription request.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of the Amazon DataZone project.

" } }, - "clientToken": { - "target": "smithy.api#String", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone project.

" } } }, @@ -4341,96 +4662,149 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateSubscriptionRequestOutput": { + "com.amazonaws.datazone#CreateProjectMembership": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateProjectMembershipInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateProjectMembershipOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a project membership in Amazon DataZone.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership" + } + } + }, + "com.amazonaws.datazone#CreateProjectMembershipInput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#SubscriptionRequestId", + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the subscription request.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which project membership is created.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the subscription request.

", + "smithy.api#documentation": "

The ID of the project for which this project membership was created.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "member": { + "target": "com.amazonaws.datazone#Member", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription request.

" + "smithy.api#documentation": "

The project member whose project membership was created.

", + "smithy.api#required": {} } }, + "designation": { + "target": "com.amazonaws.datazone#UserDesignation", + "traits": { + "smithy.api#documentation": "

The designation of the project membership.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateProjectMembershipOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateProjectOutput": { + "type": "structure", + "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in whcih the subscription request is created.

", + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the project was created.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "id": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The status of the subscription request.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone project.

", "smithy.api#required": {} } }, - "createdAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "name": { + "target": "com.amazonaws.datazone#ProjectName", "traits": { - "smithy.api#documentation": "

A timestamp of when the subscription request is created.

", + "smithy.api#documentation": "

The name of the project.

", "smithy.api#required": {} } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of the project.

" } }, - "requestReason": { - "target": "com.amazonaws.datazone#RequestReason", + "projectStatus": { + "target": "com.amazonaws.datazone#ProjectStatus", "traits": { - "smithy.api#documentation": "

The reason for the subscription request.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The status of the Amazon DataZone project that was created.

" } }, - "subscribedPrincipals": { - "target": "com.amazonaws.datazone#SubscribedPrincipals", + "failureReasons": { + "target": "com.amazonaws.datazone#FailureReasons", "traits": { - "smithy.api#documentation": "

The subscribed principals of the subscription request.

", - "smithy.api#length": { - "min": 1, - "max": 1 - }, - "smithy.api#required": {} + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" } }, - "subscribedListings": { - "target": "com.amazonaws.datazone#SubscribedListings", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The published asset for which the subscription grant is to be created.

", - "smithy.api#length": { - "min": 1, - "max": 1 - }, + "smithy.api#documentation": "

The Amazon DataZone user who created the project.

", "smithy.api#required": {} } }, - "reviewerId": { - "target": "smithy.api#String", + "createdAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The ID of the reviewer of the subscription request.

" + "smithy.api#documentation": "

The timestamp of when the project was created.

", + "smithy.api#timestampFormat": "date-time" } }, - "decisionComment": { - "target": "com.amazonaws.datazone#DecisionComment", + "lastUpdatedAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The decision comment of the subscription request.

" + "smithy.api#documentation": "

The timestamp of when the project was last updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that can be used in the project.

" } } }, @@ -4438,13 +4812,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateSubscriptionTarget": { + "com.amazonaws.datazone#CreateSubscriptionGrant": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateSubscriptionTargetInput" + "target": "com.amazonaws.datazone#CreateSubscriptionGrantInput" }, "output": { - "target": "com.amazonaws.datazone#CreateSubscriptionTargetOutput" + "target": "com.amazonaws.datazone#CreateSubscriptionGrantOutput" }, "errors": [ { @@ -4467,21 +4841,21 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a subscription target in Amazon DataZone.

", + "smithy.api#documentation": "

Creates a subsscription grant in Amazon DataZone.

", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets" + "uri": "/v2/domains/{domainIdentifier}/subscription-grants" } } }, - "com.amazonaws.datazone#CreateSubscriptionTargetInput": { + "com.amazonaws.datazone#CreateSubscriptionGrantInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which subscription target is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4489,57 +4863,28 @@ "environmentIdentifier": { "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The ID of the environment in which subscription target is created.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The ID of the environment in which the subscription grant is created.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#SubscriptionTargetName", + "subscriptionTargetIdentifier": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", "traits": { - "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#documentation": "

The ID of the subscription target for which the subscription grant is created.

", "smithy.api#required": {} } }, - "type": { - "target": "smithy.api#String", + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntityInput", "traits": { - "smithy.api#documentation": "

The type of the subscription target.

", - "smithy.api#required": {} - } - }, - "subscriptionTargetConfig": { - "target": "com.amazonaws.datazone#SubscriptionTargetForms", - "traits": { - "smithy.api#documentation": "

The configuration of the subscription target.

", - "smithy.api#required": {} - } - }, - "authorizedPrincipals": { - "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", - "traits": { - "smithy.api#documentation": "

The authorized principals of the subscription target.

", - "smithy.api#required": {} - } - }, - "manageAccessRole": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The manage access role that is used to create the subscription target.

", - "smithy.api#required": {} - } - }, - "applicableAssetTypes": { - "target": "com.amazonaws.datazone#ApplicableAssetTypes", - "traits": { - "smithy.api#documentation": "

The asset types that can be included in the subscription target.

", + "smithy.api#documentation": "

The entity to which the subscription is to be granted.

", "smithy.api#required": {} } }, - "provider": { - "target": "smithy.api#String", + "assetTargetNames": { + "target": "com.amazonaws.datazone#AssetTargetNames", "traits": { - "smithy.api#documentation": "

The provider of the subscription target.

" + "smithy.api#documentation": "

The names of the assets for which the subscription grant is created.

" } }, "clientToken": { @@ -4554,110 +4899,84 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateSubscriptionTargetOutput": { + "com.amazonaws.datazone#CreateSubscriptionGrantOutput": { "type": "structure", "members": { "id": { - "target": "com.amazonaws.datazone#SubscriptionTargetId", - "traits": { - "smithy.api#documentation": "

The ID of the subscription target.

", - "smithy.api#required": {} - } - }, - "authorizedPrincipals": { - "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", - "traits": { - "smithy.api#documentation": "

The authorised principals of the subscription target.

", - "smithy.api#required": {} - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", - "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription target was created.

", - "smithy.api#required": {} - } - }, - "projectId": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "

???

", - "smithy.api#required": {} - } - }, - "environmentId": { - "target": "com.amazonaws.datazone#EnvironmentId", - "traits": { - "smithy.api#documentation": "

The ID of the environment in which the subscription target was created.

", - "smithy.api#required": {} - } - }, - "name": { - "target": "com.amazonaws.datazone#SubscriptionTargetName", - "traits": { - "smithy.api#documentation": "

The name of the subscription target.

", - "smithy.api#required": {} - } - }, - "type": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#SubscriptionGrantId", "traits": { - "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#documentation": "

The ID of the subscription grant.

", "smithy.api#required": {} } }, "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the subscription target.

", + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription grant.

", "smithy.api#required": {} } }, "updatedBy": { "target": "com.amazonaws.datazone#UpdatedBy", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription target.

" + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is created.

", + "smithy.api#required": {} } }, "createdAt": { "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

The timestamp of when the subscription target was created.

", + "smithy.api#documentation": "

A timestamp of when the subscription grant is created.

", "smithy.api#required": {} } }, "updatedAt": { "target": "com.amazonaws.datazone#UpdatedAt", "traits": { - "smithy.api#documentation": "

The timestamp of when the subscription target was updated.

" + "smithy.api#documentation": "

A timestamp of when the subscription grant was updated.

", + "smithy.api#required": {} } }, - "manageAccessRole": { - "target": "smithy.api#String", + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", "traits": { - "smithy.api#documentation": "

The manage access role with which the subscription target was created.

", + "smithy.api#documentation": "

The ID of the subscription target for which the subscription grant is created.

", "smithy.api#required": {} } }, - "applicableAssetTypes": { - "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", "traits": { - "smithy.api#documentation": "

The asset types that can be included in the subscription target.

", + "smithy.api#documentation": "

The entity to which the subscription is granted.

", "smithy.api#required": {} } }, - "subscriptionTargetConfig": { - "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", "traits": { - "smithy.api#documentation": "

The configuration of the subscription target.

", + "smithy.api#documentation": "

The status of the subscription grant.

", "smithy.api#required": {} } }, - "provider": { - "target": "smithy.api#String", + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", "traits": { - "smithy.api#documentation": "

The provider of the subscription target.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The assets for which the subscription grant is created.

" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, + "smithy.api#documentation": "

The identifier of the subscription grant.

" } } }, @@ -4665,63 +4984,73 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateUserProfile": { + "com.amazonaws.datazone#CreateSubscriptionRequest": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateUserProfileInput" + "target": "com.amazonaws.datazone#CreateSubscriptionRequestInput" }, "output": { - "target": "com.amazonaws.datazone#CreateUserProfileOutput" + "target": "com.amazonaws.datazone#CreateSubscriptionRequestOutput" }, "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, { "target": "com.amazonaws.datazone#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Creates a user profile in Amazon DataZone.

", + "smithy.api#documentation": "

Creates a subscription request in Amazon DataZone.

", "smithy.api#http": { - "code": 201, + "code": 200, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/user-profiles" - }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "uri": "/v2/domains/{domainIdentifier}/subscription-requests" + } } }, - "com.amazonaws.datazone#CreateUserProfileInput": { + "com.amazonaws.datazone#CreateSubscriptionRequestInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is created.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription request is created.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "userIdentifier": { - "target": "com.amazonaws.datazone#UserIdentifier", + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipalInputs", "traits": { - "smithy.api#documentation": "

The identifier of the user for which the user profile is created.

", + "smithy.api#documentation": "

The Amazon DataZone principals for whom the subscription request is created.

", "smithy.api#required": {} } }, - "userType": { - "target": "com.amazonaws.datazone#UserType", + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListingInputs", "traits": { - "smithy.api#documentation": "

The user type of the user for which the user profile is created.

" + "smithy.api#documentation": "

The published asset for which the subscription grant is to be created.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} } }, "clientToken": { @@ -4736,221 +5065,859 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateUserProfileOutput": { + "com.amazonaws.datazone#CreateSubscriptionRequestOutput": { "type": "structure", "members": { - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is created.

" + "smithy.api#documentation": "

The ID of the subscription request.

", + "smithy.api#required": {} } }, - "id": { - "target": "com.amazonaws.datazone#UserProfileId", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The identifier of the user profile.

" + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription request.

", + "smithy.api#required": {} } }, - "type": { - "target": "com.amazonaws.datazone#UserProfileType", + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", "traits": { - "smithy.api#documentation": "

The type of the user profile.

" + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription request.

" } }, - "status": { - "target": "com.amazonaws.datazone#UserProfileStatus", + "domainId": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The status of the user profile.

" + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in whcih the subscription request is created.

", + "smithy.api#required": {} } }, - "details": { - "target": "com.amazonaws.datazone#UserProfileDetails" - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.datazone#CreatedAt": { - "type": "timestamp" - }, - "com.amazonaws.datazone#CreatedBy": { - "type": "string" - }, - "com.amazonaws.datazone#CronString": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - }, - "smithy.api#pattern": "cron\\((\\b[0-5]?[0-9]\\b) (\\b2[0-3]\\b|\\b[0-1]?[0-9]\\b) (.*){1,5} (.*){1,5} (.*){1,5} (.*){1,5}\\)" - } - }, - "com.amazonaws.datazone#CustomParameter": { - "type": "structure", - "members": { - "keyName": { - "target": "smithy.api#String", + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", "traits": { - "smithy.api#documentation": "

The key name of the parameter.

", - "smithy.api#pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", + "smithy.api#documentation": "

The status of the subscription request.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

The description of the parameter.

" + "smithy.api#documentation": "

A timestamp of when the subscription request is created.

", + "smithy.api#required": {} } }, - "fieldType": { - "target": "smithy.api#String", + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", "traits": { - "smithy.api#documentation": "

The filed type of the parameter.

", + "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

The subscribed principals of the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

The published asset for which the subscription grant is to be created.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the reviewer of the subscription request.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the subscription request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionTarget": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateSubscriptionTargetInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateSubscriptionTargetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a subscription target in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets" + } + } + }, + "com.amazonaws.datazone#CreateSubscriptionTargetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which subscription target is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which subscription target is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration of the subscription target.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorized principals of the subscription target.

", + "smithy.api#required": {} + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role that is used to create the subscription target.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The asset types that can be included in the subscription target.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the subscription target.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionTargetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorised principals of the subscription target.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

???

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription target.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription target.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was updated.

" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role with which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The asset types that can be included in the subscription target.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration of the subscription target.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the subscription target.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateUserProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateUserProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateUserProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a user profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/user-profiles" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#CreateUserProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userIdentifier": { + "target": "com.amazonaws.datazone#UserIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the user for which the user profile is created.

", + "smithy.api#required": {} + } + }, + "userType": { + "target": "com.amazonaws.datazone#UserType", + "traits": { + "smithy.api#documentation": "

The user type of the user for which the user profile is created.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateUserProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is created.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#UserProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the user profile.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the user profile.

" + } + }, + "details": { + "target": "com.amazonaws.datazone#UserProfileDetails" + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreatedAt": { + "type": "timestamp" + }, + "com.amazonaws.datazone#CreatedBy": { + "type": "string" + }, + "com.amazonaws.datazone#CronString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "cron\\((\\b[0-5]?[0-9]\\b) (\\b2[0-3]\\b|\\b[0-1]?[0-9]\\b) (.*){1,5} (.*){1,5} (.*){1,5} (.*){1,5}\\)" + } + }, + "com.amazonaws.datazone#CustomParameter": { + "type": "structure", + "members": { + "keyName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The key name of the parameter.

", + "smithy.api#pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the parameter.

" + } + }, + "fieldType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The filed type of the parameter.

", "smithy.api#required": {} } }, "defaultValue": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The default value of the parameter.

" + "smithy.api#documentation": "

The default value of the parameter.

" + } + }, + "isEditable": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the parameter is editable.

" + } + }, + "isOptional": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the custom parameter is optional.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of user parameters of an environment blueprint.

" + } + }, + "com.amazonaws.datazone#CustomParameterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#CustomParameter" + } + }, + "com.amazonaws.datazone#DataAssetActivityStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "PUBLISHING_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHING_FAILED" + } + }, + "SUCCEEDED_CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED_CREATED" + } + }, + "SUCCEEDED_UPDATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED_UPDATED" + } + }, + "SKIPPED_ALREADY_IMPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_ALREADY_IMPORTED" + } + }, + "SKIPPED_ARCHIVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_ARCHIVED" + } + }, + "SKIPPED_NO_ACCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_NO_ACCESS" + } + }, + "UNCHANGED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNCHANGED" + } + } + } + }, + "com.amazonaws.datazone#DataPointIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{0,36}$" + } + }, + "com.amazonaws.datazone#DataProduct": { + "type": "resource", + "identifiers": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "identifier": { + "target": "com.amazonaws.datazone#DataProductId" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#DataProductId" + }, + "revision": { + "target": "com.amazonaws.datazone#Revision" + }, + "name": { + "target": "com.amazonaws.datazone#DataProductName" + }, + "description": { + "target": "com.amazonaws.datazone#DataProductDescription" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms" + }, + "items": { + "target": "com.amazonaws.datazone#DataProductItems" + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList" + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "status": { + "target": "com.amazonaws.datazone#DataProductStatus" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateDataProduct" + }, + "read": { + "target": "com.amazonaws.datazone#GetDataProduct" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteDataProduct" + }, + "operations": [ + { + "target": "com.amazonaws.datazone#CreateDataProductRevision" + } + ] + }, + "com.amazonaws.datazone#DataProductDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DataProductId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#DataProductItem": { + "type": "structure", + "members": { + "itemType": { + "target": "com.amazonaws.datazone#DataProductItemType", + "traits": { + "smithy.api#documentation": "

The type of the data product.

", + "smithy.api#required": {} } }, - "isEditable": { - "target": "smithy.api#Boolean", + "identifier": { + "target": "com.amazonaws.datazone#EntityIdentifier", "traits": { - "smithy.api#documentation": "

Specifies whether the parameter is editable.

" + "smithy.api#documentation": "

The ID of the data product.

", + "smithy.api#required": {} } }, - "isOptional": { - "target": "smithy.api#Boolean", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

Specifies whether the custom parameter is optional.

" + "smithy.api#documentation": "

The revision of the data product.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#ItemGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms of the data product.

" } } }, "traits": { - "smithy.api#documentation": "

The details of user parameters of an environment blueprint.

" - } - }, - "com.amazonaws.datazone#CustomParameterList": { - "type": "list", - "member": { - "target": "com.amazonaws.datazone#CustomParameter" + "smithy.api#documentation": "

The data product.

" } }, - "com.amazonaws.datazone#DataAssetActivityStatus": { + "com.amazonaws.datazone#DataProductItemType": { "type": "enum", "members": { - "FAILED": { + "ASSET": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "FAILED" + "smithy.api#enumValue": "ASSET" } - }, - "PUBLISHING_FAILED": { - "target": "smithy.api#Unit", + } + } + }, + "com.amazonaws.datazone#DataProductItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataProductItem" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.datazone#DataProductListing": { + "type": "structure", + "members": { + "dataProductId": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#enumValue": "PUBLISHING_FAILED" + "smithy.api#documentation": "

The ID of the data product listing.

" } }, - "SUCCEEDED_CREATED": { - "target": "smithy.api#Unit", + "dataProductRevision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#enumValue": "SUCCEEDED_CREATED" + "smithy.api#documentation": "

The revision of the data product listing.

" } }, - "SUCCEEDED_UPDATED": { - "target": "smithy.api#Unit", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#enumValue": "SUCCEEDED_UPDATED" + "smithy.api#documentation": "

The timestamp at which the data product listing was created.

" } }, - "SKIPPED_ALREADY_IMPORTED": { - "target": "smithy.api#Unit", + "forms": { + "target": "com.amazonaws.datazone#Forms", "traits": { - "smithy.api#enumValue": "SKIPPED_ALREADY_IMPORTED" + "smithy.api#documentation": "

The metadata forms of the data product listing.

" } }, - "SKIPPED_ARCHIVED": { - "target": "smithy.api#Unit", + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", "traits": { - "smithy.api#enumValue": "SKIPPED_ARCHIVED" + "smithy.api#documentation": "

The glossary terms of the data product listing.

" } }, - "SKIPPED_NO_ACCESS": { - "target": "smithy.api#Unit", + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#enumValue": "SKIPPED_NO_ACCESS" + "smithy.api#documentation": "

The ID of the owning project of the data product listing.

" } }, - "UNCHANGED": { - "target": "smithy.api#Unit", + "items": { + "target": "com.amazonaws.datazone#ListingSummaries", "traits": { - "smithy.api#enumValue": "UNCHANGED" + "smithy.api#documentation": "

The data assets of the data product listing.

" } } - } - }, - "com.amazonaws.datazone#DataPointIdentifier": { - "type": "string", - "traits": { - "smithy.api#pattern": "^[a-zA-Z0-9_-]{0,36}$" - } - }, - "com.amazonaws.datazone#DataProductDescription": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 4096 - }, - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.datazone#DataProductId": { - "type": "string", + }, "traits": { - "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + "smithy.api#documentation": "

The data product listing.

" } }, - "com.amazonaws.datazone#DataProductItem": { + "com.amazonaws.datazone#DataProductListingItem": { "type": "structure", "members": { - "itemId": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The ID of the listing.

" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the listing.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#DataProductName", + "traits": { + "smithy.api#documentation": "

The name of the asset of the data product.

" + } + }, + "entityId": { "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The entity ID of the asset of the asset of the data product.

" } }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset of the asset of the data product.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset of the asset of the data product.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the asset of the data product listing was created.

" + } + }, + "listingCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The timestamp at which the listing was created.

" + } + }, + "listingUpdatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The user who updated the listing.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms of the asset of the asset of the data product.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the owning project of the asset of the data product.

" + } + }, + "additionalAttributes": { + "target": "com.amazonaws.datazone#DataProductListingItemAdditionalAttributes", + "traits": { + "smithy.api#documentation": "

The additional attributes of the asset of the data product.

" + } + }, + "items": { + "target": "com.amazonaws.datazone#ListingSummaryItems", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The data of the asset of the data product.

" } } }, "traits": { - "smithy.api#deprecated": { - "message": "This structure is deprecated." - }, - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The asset of the data product listing.

" } }, - "com.amazonaws.datazone#DataProductItems": { - "type": "list", - "member": { - "target": "com.amazonaws.datazone#DataProductItem" + "com.amazonaws.datazone#DataProductListingItemAdditionalAttributes": { + "type": "structure", + "members": { + "forms": { + "target": "com.amazonaws.datazone#Forms", + "traits": { + "smithy.api#documentation": "

The metadata forms of the asset of the data product.

" + } + } }, "traits": { - "smithy.api#deprecated": { - "message": "This structure is deprecated." - }, - "smithy.api#length": { - "min": 0, - "max": 100 - } + "smithy.api#documentation": "

The additional attributes of the asset of the data product.

" } }, "com.amazonaws.datazone#DataProductName": { @@ -4963,85 +5930,143 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.datazone#DataProductSummary": { + "com.amazonaws.datazone#DataProductResultItem": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

The ID of the domain where the data product lives.

", "smithy.api#required": {} } }, "id": { "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

The ID of the data product.

", "smithy.api#required": {} } }, "name": { "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

The name of the data product.

", "smithy.api#required": {} } }, "owningProjectId": { "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

The ID of the owning project of the data product.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The description of the data product.

" } }, "glossaryTerms": { "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The glossary terms of the data product.

" } }, - "dataProductItems": { - "target": "com.amazonaws.datazone#DataProductItems", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the data product was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The user who created the data product.

" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which first revision of the data product was created.

" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The user who created the first revision of the data product.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The data product.

" + } + }, + "com.amazonaws.datazone#DataProductRevision": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where the data product revision lives.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "

The ID of the data product revision.

" + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The data product revision.

" } }, "createdAt": { "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The timestamp at which the data product revision was created.

" } }, "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

" + "smithy.api#documentation": "

The user who created the data product revision.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The data product revision.

" + } + }, + "com.amazonaws.datazone#DataProductRevisions": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataProductRevision" + } + }, + "com.amazonaws.datazone#DataProductStatus": { + "type": "enum", + "members": { + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "CREATING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

" + "smithy.api#enumValue": "CREATING" } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "CREATE_FAILED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

" + "smithy.api#enumValue": "CREATE_FAILED" } } - }, - "traits": { - "smithy.api#deprecated": { - "message": "This structure is deprecated." - }, - "smithy.api#documentation": "

" } }, "com.amazonaws.datazone#DataSource": { @@ -5715,6 +6740,9 @@ { "target": "com.amazonaws.datazone#CancelSubscription" }, + { + "target": "com.amazonaws.datazone#CreateAssetFilter" + }, { "target": "com.amazonaws.datazone#CreateEnvironment" }, @@ -5748,6 +6776,9 @@ { "target": "com.amazonaws.datazone#CreateUserProfile" }, + { + "target": "com.amazonaws.datazone#DeleteAssetFilter" + }, { "target": "com.amazonaws.datazone#DeleteEnvironment" }, @@ -5778,6 +6809,9 @@ { "target": "com.amazonaws.datazone#DisassociateEnvironmentRole" }, + { + "target": "com.amazonaws.datazone#GetAssetFilter" + }, { "target": "com.amazonaws.datazone#GetEnvironment" }, @@ -5787,6 +6821,9 @@ { "target": "com.amazonaws.datazone#GetEnvironmentBlueprint" }, + { + "target": "com.amazonaws.datazone#GetEnvironmentCredentials" + }, { "target": "com.amazonaws.datazone#GetEnvironmentProfile" }, @@ -5820,9 +6857,15 @@ { "target": "com.amazonaws.datazone#GetUserProfile" }, + { + "target": "com.amazonaws.datazone#ListAssetFilters" + }, { "target": "com.amazonaws.datazone#ListAssetRevisions" }, + { + "target": "com.amazonaws.datazone#ListDataProductRevisions" + }, { "target": "com.amazonaws.datazone#ListDataSourceRunActivities" }, @@ -5904,6 +6947,9 @@ { "target": "com.amazonaws.datazone#UntagResource" }, + { + "target": "com.amazonaws.datazone#UpdateAssetFilter" + }, { "target": "com.amazonaws.datazone#UpdateEnvironment" }, @@ -5939,6 +6985,9 @@ { "target": "com.amazonaws.datazone#AssetType" }, + { + "target": "com.amazonaws.datazone#DataProduct" + }, { "target": "com.amazonaws.datazone#DataSource" }, @@ -6419,6 +7468,9 @@ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, @@ -6433,7 +7485,7 @@ } ], "traits": { - "smithy.api#documentation": "

Delets an asset in Amazon DataZone.

", + "smithy.api#documentation": "

Deletes an asset in Amazon DataZone.

", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -6442,6 +7494,76 @@ "smithy.api#idempotent": {} } }, + "com.amazonaws.datazone#DeleteAssetFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteAssetFilterInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an asset filter.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where you want to delete an asset filter.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.datazone#DeleteAssetInput": { "type": "structure", "members": { @@ -6541,6 +7663,75 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#DeleteDataProduct": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteDataProductInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteDataProductOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an data product in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteDataProductInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data product is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "

The identifier of the data product that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteDataProductOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#DeleteDataSource": { "type": "operation", "input": { @@ -7661,6 +8852,9 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, "smithy.api#documentation": "

The identifier of the subsctiption whose subscription grant is to be deleted.

" } } @@ -8402,6 +9596,12 @@ "traits": { "smithy.api#enumValue": "ASSET" } + }, + "DATA_PRODUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_PRODUCT" + } } } }, @@ -8569,6 +9769,12 @@ "smithy.api#documentation": "

The timestamp of when the environment blueprint was updated.

", "smithy.api#timestampFormat": "date-time" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "

The provisioning configuration of a blueprint.

" + } } }, "traits": { @@ -9011,6 +10217,28 @@ "smithy.api#documentation": "

The details of an environment.

" } }, + "com.amazonaws.datazone#EqualToExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value that might be equal to an expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies whether the value is equal to an expression.

" + } + }, "com.amazonaws.datazone#ErrorMessage": { "type": "string" }, @@ -9145,6 +10373,12 @@ "target": "com.amazonaws.datazone#FilterExpression" } }, + "com.amazonaws.datazone#FilterId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, "com.amazonaws.datazone#FilterList": { "type": "list", "member": { @@ -9157,6 +10391,34 @@ } } }, + "com.amazonaws.datazone#FilterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w -]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FilterStatus": { + "type": "enum", + "members": { + "VALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VALID" + } + }, + "INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID" + } + } + } + }, "com.amazonaws.datazone#FirstName": { "type": "string", "traits": { @@ -9515,30 +10777,65 @@ "min": 0, "max": 10 } - } - }, - "com.amazonaws.datazone#FormsOutputMap": { - "type": "map", - "key": { - "target": "com.amazonaws.datazone#FormName" - }, - "value": { - "target": "com.amazonaws.datazone#FormEntryOutput" - }, + } + }, + "com.amazonaws.datazone#FormsOutputMap": { + "type": "map", + "key": { + "target": "com.amazonaws.datazone#FormName" + }, + "value": { + "target": "com.amazonaws.datazone#FormEntryOutput" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.datazone#GetAsset": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetAssetInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetAssetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], "traits": { - "smithy.api#length": { - "min": 0, - "max": 10 - } + "smithy.api#documentation": "

Gets an Amazon DataZone asset.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}" + }, + "smithy.api#readonly": {} } }, - "com.amazonaws.datazone#GetAsset": { + "com.amazonaws.datazone#GetAssetFilter": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#GetAssetInput" + "target": "com.amazonaws.datazone#GetAssetFilterInput" }, "output": { - "target": "com.amazonaws.datazone#GetAssetOutput" + "target": "com.amazonaws.datazone#GetAssetFilterOutput" }, "errors": [ { @@ -9558,15 +10855,126 @@ } ], "traits": { - "smithy.api#documentation": "

Gets an Amazon DataZone asset.

", + "smithy.api#documentation": "

Gets an asset filter.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}" + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}" }, "smithy.api#readonly": {} } }, + "com.amazonaws.datazone#GetAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where you want to get an asset filter.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetAssetFilterOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where you want to get an asset filter.

", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "

The name of the asset filter.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset filter.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset filter.

" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the asset filter.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the asset filter was created.

" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error message that is displayed if the action does not complete successfully.

" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "

The column names of the asset filter.

" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The row filter of the asset filter.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#GetAssetInput": { "type": "structure", "members": { @@ -9657,64 +11065,215 @@ "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the asset.

" + "smithy.api#documentation": "

The Amazon DataZone user who created the asset.

" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the first revision of the asset was created.

" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the first revision of the asset.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The business glossary terms attached to the asset.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that owns the asset.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain to which the asset belongs.

", + "smithy.api#required": {} + } + }, + "listing": { + "target": "com.amazonaws.datazone#AssetListingDetails", + "traits": { + "smithy.api#documentation": "

The listing of the asset.

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms attached to the asset.

", + "smithy.api#required": {} + } + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The read-only metadata forms attached to the asset.

" + } + }, + "latestTimeSeriesDataPointFormsOutput": { + "target": "com.amazonaws.datazone#TimeSeriesDataPointSummaryFormOutputList", + "traits": { + "smithy.api#documentation": "

The latest data point that was imported into the time series form for the asset.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetAssetType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetAssetTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetAssetTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone asset type.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/asset-types/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetAssetTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the asset type.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#httpQuery": "revision" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetAssetTypeOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type exists.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The name of the asset type.

", + "smithy.api#required": {} } }, - "firstRevisionCreatedAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The timestamp of when the first revision of the asset was created.

" + "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#required": {} } }, - "firstRevisionCreatedBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the first revision of the asset.

" + "smithy.api#documentation": "

The description of the asset type.

" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "formsOutput": { + "target": "com.amazonaws.datazone#FormsOutputMap", "traits": { - "smithy.api#documentation": "

The business glossary terms attached to the asset.

" + "smithy.api#documentation": "

The metadata forms attached to the asset type.

", + "smithy.api#required": {} } }, "owningProjectId": { "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The ID of the project that owns the asset.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns the asset type.

", "smithy.api#required": {} } }, - "domainId": { + "originDomainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain to which the asset belongs.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type was originally created.

" } }, - "listing": { - "target": "com.amazonaws.datazone#AssetListingDetails", + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The listing of the asset.

" + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which the asset type was originally created.

" } }, - "formsOutput": { - "target": "com.amazonaws.datazone#FormOutputList", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

The metadata forms attached to the asset.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The timestamp of when the asset type was created.

" } }, - "readOnlyFormsOutput": { - "target": "com.amazonaws.datazone#FormOutputList", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The read-only metadata forms attached to the asset.

" + "smithy.api#documentation": "

The Amazon DataZone user who created the asset type.

" } }, - "latestTimeSeriesDataPointFormsOutput": { - "target": "com.amazonaws.datazone#TimeSeriesDataPointSummaryFormOutputList", + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", "traits": { - "smithy.api#documentation": "

The latest data point that was imported into the time series form for the asset.

" + "smithy.api#documentation": "

The timestamp of when the asset type was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user that updated the asset type.

" } } }, @@ -9722,13 +11281,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#GetAssetType": { + "com.amazonaws.datazone#GetDataProduct": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#GetAssetTypeInput" + "target": "com.amazonaws.datazone#GetDataProductInput" }, "output": { - "target": "com.amazonaws.datazone#GetAssetTypeOutput" + "target": "com.amazonaws.datazone#GetDataProductOutput" }, "errors": [ { @@ -9748,30 +11307,30 @@ } ], "traits": { - "smithy.api#documentation": "

Gets an Amazon DataZone asset type.

", + "smithy.api#documentation": "

Gets the data product.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/v2/domains/{domainIdentifier}/asset-types/{identifier}" + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}" }, "smithy.api#readonly": {} } }, - "com.amazonaws.datazone#GetAssetTypeInput": { + "com.amazonaws.datazone#GetDataProductInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type exists.

", + "smithy.api#documentation": "

The ID of the domain where the data product lives.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, "identifier": { - "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

The ID of the asset type.

", + "smithy.api#documentation": "

The ID of the data product.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9779,7 +11338,7 @@ "revision": { "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#documentation": "

The revision of the data product.

", "smithy.api#httpQuery": "revision" } } @@ -9788,84 +11347,98 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#GetAssetTypeOutput": { + "com.amazonaws.datazone#GetDataProductOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type exists.

", + "smithy.api#documentation": "

The ID of the domain where the data product lives.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#TypeName", + "id": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "

The name of the asset type.

", + "smithy.api#documentation": "

The ID of the data product.

", "smithy.api#required": {} } }, "revision": { "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#documentation": "

The revision of the data product.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "

The description of the asset type.

" + "smithy.api#documentation": "

The ID of the owning project of the data product.

", + "smithy.api#required": {} } }, - "formsOutput": { - "target": "com.amazonaws.datazone#FormsOutputMap", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "

The metadata forms attached to the asset type.

", + "smithy.api#documentation": "

The name of the data product.

", "smithy.api#required": {} } }, - "owningProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "status": { + "target": "com.amazonaws.datazone#DataProductStatus", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns the asset type.

", + "smithy.api#default": "CREATED", + "smithy.api#documentation": "

The status of the data product.

", "smithy.api#required": {} } }, - "originDomainId": { - "target": "com.amazonaws.datazone#DomainId", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type was originally created.

" + "smithy.api#documentation": "

The description of the data product.

" } }, - "originProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "

The ID of the Amazon DataZone project in which the asset type was originally created.

" + "smithy.api#documentation": "

The glossary terms of the data product.

" + } + }, + "items": { + "target": "com.amazonaws.datazone#DataProductItems", + "traits": { + "smithy.api#documentation": "

The data assets of the data product.

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms of the data product.

" } }, "createdAt": { "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

The timestamp of when the asset type was created.

" + "smithy.api#documentation": "

The timestamp at which the data product is created.

" } }, "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who created the asset type.

" + "smithy.api#documentation": "

The user who created the data product.

" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "

The timestamp of when the asset type was updated.

" + "smithy.api#documentation": "

The timestamp at which the first revision of the data product is created.

" } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user that updated the asset type.

" + "smithy.api#documentation": "

The user who created the first revision of the data product.

" } } }, @@ -10683,27 +12256,167 @@ "smithy.api#documentation": "

The timestamp of when this blueprint was upated.

", "smithy.api#timestampFormat": "date-time" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "

The provisioning configuration of a blueprint.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain in which this blueprint exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of this Amazon DataZone blueprint.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintName", + "traits": { + "smithy.api#documentation": "

The name of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone blueprint.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "

The deployment properties of this Amazon DataZone blueprint.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of this blueprint.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms attached to this Amazon DataZone blueprint.

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

A timestamp of when this blueprint was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this blueprint was updated.

", + "smithy.api#timestampFormat": "date-time" + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.datazone#GetEnvironmentBlueprintInput": { + "com.amazonaws.datazone#GetEnvironmentCredentials": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentCredentialsInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentCredentialsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the credentials of an environment in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/credentials" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentCredentialsInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "

The identifier of the domain in which this blueprint exists.

", + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment and its credentials\n exist.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "identifier": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The ID of this Amazon DataZone blueprint.

", + "smithy.api#documentation": "

The ID of the environment whose credentials this operation gets.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10713,78 +12426,38 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#GetEnvironmentBlueprintOutput": { + "com.amazonaws.datazone#GetEnvironmentCredentialsOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", - "traits": { - "smithy.api#documentation": "

The ID of this Amazon DataZone blueprint.

", - "smithy.api#required": {} - } - }, - "name": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintName", - "traits": { - "smithy.api#documentation": "

The name of this Amazon DataZone blueprint.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "

The description of this Amazon DataZone blueprint.

" - } - }, - "provider": { + "accessKeyId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The provider of this Amazon DataZone blueprint.

", - "smithy.api#required": {} - } - }, - "provisioningProperties": { - "target": "com.amazonaws.datazone#ProvisioningProperties", - "traits": { - "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone blueprint.

", - "smithy.api#required": {} - } - }, - "deploymentProperties": { - "target": "com.amazonaws.datazone#DeploymentProperties", - "traits": { - "smithy.api#documentation": "

The deployment properties of this Amazon DataZone blueprint.

" - } - }, - "userParameters": { - "target": "com.amazonaws.datazone#CustomParameterList", - "traits": { - "smithy.api#documentation": "

The user parameters of this blueprint.

" + "smithy.api#documentation": "

The access key ID of the environment.

" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "secretAccessKey": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The glossary terms attached to this Amazon DataZone blueprint.

" + "smithy.api#documentation": "

The secret access key of the environment credentials.

" } }, - "createdAt": { - "target": "smithy.api#Timestamp", + "sessionToken": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

A timestamp of when this blueprint was created.

", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "

The session token of the environment credentials.

" } }, - "updatedAt": { + "expiration": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of when this blueprint was updated.

", + "smithy.api#documentation": "

The expiration timestamp of the environment credentials.

", "smithy.api#timestampFormat": "date-time" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#sensitive": {} } }, "com.amazonaws.datazone#GetEnvironmentInput": { @@ -11884,7 +13557,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a listing (a record of an asset at a given time).

", + "smithy.api#documentation": "

Gets a listing (a record of an asset at a given time). If you specify a listing version,\n only details that are specific to that version are returned.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -12421,6 +14094,9 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, "smithy.api#documentation": "

The identifier of the subscription.

" } } @@ -13579,6 +15255,50 @@ "smithy.api#documentation": "

The details of a listing for which a subscription is to be granted.

" } }, + "com.amazonaws.datazone#GreaterThanExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value that might be greater than an expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies whether the value is greater than an expression.

" + } + }, + "com.amazonaws.datazone#GreaterThanOrEqualToExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value that might be greater than or equal to an expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies whether the value is greater than or equal to an expression.

" + } + }, "com.amazonaws.datazone#GroupDetails": { "type": "structure", "members": { @@ -13710,97 +15430,253 @@ } }, "traits": { - "smithy.api#documentation": "

The details of an IAM user profile in Amazon DataZone.

" + "smithy.api#documentation": "

The details of an IAM user profile in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#Import": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of the import.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the import.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the import of the metadata form type.

" + } + }, + "com.amazonaws.datazone#ImportList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#Import" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.datazone#InExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.datazone#StringList", + "traits": { + "smithy.api#documentation": "

The values that might be in the expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies whether values are in the expression.

" + } + }, + "com.amazonaws.datazone#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request has failed because of an unknown error, exception or failure.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.datazone#InventorySearchScope": { + "type": "enum", + "members": { + "ASSET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSET" + } + }, + "GLOSSARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOSSARY" + } + }, + "GLOSSARY_TERM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOSSARY_TERM" + } + }, + "DATA_PRODUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_PRODUCT" + } + } + } + }, + "com.amazonaws.datazone#IsNotNullExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies that the expression is not null.

" + } + }, + "com.amazonaws.datazone#IsNullExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies that the expression is null.

" + } + }, + "com.amazonaws.datazone#ItemGlossaryTerms": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#GlossaryTermId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.datazone#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" + } + }, + "com.amazonaws.datazone#LakeFormationConfiguration": { + "type": "structure", + "members": { + "locationRegistrationRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The role that is used to manage read/write access to the chosen Amazon S3 bucket(s) for\n Data Lake using AWS Lake Formation hybrid access mode.

" + } + }, + "locationRegistrationExcludeS3Locations": { + "target": "com.amazonaws.datazone#S3LocationList", + "traits": { + "smithy.api#documentation": "

Specifies certain Amazon S3 locations if you do not want Amazon DataZone to\n automatically register them in hybrid mode.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Lake Formation configuration of the Data Lake blueprint.

" + } + }, + "com.amazonaws.datazone#LastName": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} } }, - "com.amazonaws.datazone#Import": { + "com.amazonaws.datazone#LessThanExpression": { "type": "structure", "members": { - "name": { - "target": "com.amazonaws.datazone#FormTypeName", + "columnName": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of the import.

", + "smithy.api#documentation": "

The name of the column.

", "smithy.api#required": {} } }, - "revision": { - "target": "com.amazonaws.datazone#Revision", + "value": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The revision of the import.

", + "smithy.api#documentation": "

The value that might be less than the expression.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The details of the import of the metadata form type.

" - } - }, - "com.amazonaws.datazone#ImportList": { - "type": "list", - "member": { - "target": "com.amazonaws.datazone#Import" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 10 - } + "smithy.api#documentation": "

Specifies that a value is less than an expression.

" } }, - "com.amazonaws.datazone#InternalServerException": { + "com.amazonaws.datazone#LessThanOrEqualToExpression": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.datazone#ErrorMessage", + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", "traits": { + "smithy.api#documentation": "

The value that might be less than or equal to an expression.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The request has failed because of an unknown error, exception or failure.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500, - "smithy.api#retryable": {} + "smithy.api#documentation": "

Specifies that a value is less than or equal to an expression.

" } }, - "com.amazonaws.datazone#InventorySearchScope": { - "type": "enum", + "com.amazonaws.datazone#LikeExpression": { + "type": "structure", "members": { - "ASSET": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ASSET" - } - }, - "GLOSSARY": { - "target": "smithy.api#Unit", + "columnName": { + "target": "smithy.api#String", "traits": { - "smithy.api#enumValue": "GLOSSARY" + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} } }, - "GLOSSARY_TERM": { - "target": "smithy.api#Unit", + "value": { + "target": "smithy.api#String", "traits": { - "smithy.api#enumValue": "GLOSSARY_TERM" + "smithy.api#documentation": "

The value that might be like the expression.

", + "smithy.api#required": {} } } - } - }, - "com.amazonaws.datazone#KmsKeyArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1024 - }, - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" - } - }, - "com.amazonaws.datazone#LastName": { - "type": "string", + }, "traits": { - "smithy.api#sensitive": {} + "smithy.api#documentation": "

Specifies that a value is like the expression.

" } }, "com.amazonaws.datazone#LineageEvent": { @@ -14012,6 +15888,113 @@ "smithy.api#documentation": "

The details of a data lineage node type.

" } }, + "com.amazonaws.datazone#ListAssetFilters": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListAssetFiltersInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListAssetFiltersOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists asset filters.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListAssetFiltersInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where you want to list asset filters.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset filter.

", + "smithy.api#httpQuery": "status" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of asset filters is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of asset filters, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListAssetFilters to\n list the next set of asset filters.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of asset filters to return in a single call to\n ListAssetFilters. When the number of asset filters to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListAssetFilters to list the\n next set of asset filters.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListAssetFiltersOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#AssetFilters", + "traits": { + "smithy.api#documentation": "

The results of the ListAssetFilters action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of asset filters is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of asset filters, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListAssetFilters to\n list the next set of asset filters.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#ListAssetRevisions": { "type": "operation", "input": { @@ -14077,8 +16060,107 @@ "maxResults": { "target": "com.amazonaws.datazone#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of revisions to return in a single call to\n ListAssetRevisions. When the number of revisions to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListAssetRevisions to list the\n next set of revisions.

", - "smithy.api#httpQuery": "maxResults" + "smithy.api#documentation": "

The maximum number of revisions to return in a single call to\n ListAssetRevisions. When the number of revisions to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListAssetRevisions to list the\n next set of revisions.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListAssetRevisionsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#AssetRevisions", + "traits": { + "smithy.api#documentation": "

The results of the ListAssetRevisions action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of revisions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of revisions, the response includes\n a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListAssetRevisions to\n list the next set of revisions.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListDataProductRevisions": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListDataProductRevisionsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListDataProductRevisionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists data product revisions.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListDataProductRevisionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain of the data product revisions that you want to list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "

The ID of the data product revision.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of asset filters to return in a single call to\n ListDataProductRevisions. When the number of data product revisions to be\n listed is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListDataProductRevisions to list the next set of data product\n revisions.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of data product revisions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of data product revisions, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListDataProductRevisions to list the next set of data product\n revisions.

", + "smithy.api#httpQuery": "nextToken" } } }, @@ -14086,19 +16168,20 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#ListAssetRevisionsOutput": { + "com.amazonaws.datazone#ListDataProductRevisionsOutput": { "type": "structure", "members": { "items": { - "target": "com.amazonaws.datazone#AssetRevisions", + "target": "com.amazonaws.datazone#DataProductRevisions", "traits": { - "smithy.api#documentation": "

The results of the ListAssetRevisions action.

" + "smithy.api#documentation": "

The results of the ListDataProductRevisions action.

", + "smithy.api#required": {} } }, "nextToken": { "target": "com.amazonaws.datazone#PaginationToken", "traits": { - "smithy.api#documentation": "

When the number of revisions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of revisions, the response includes\n a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListAssetRevisions to\n list the next set of revisions.

" + "smithy.api#documentation": "

When the number of data product revisions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of data product revisions, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListDataProductRevisions to list the next set of data product\n revisions.

" } } }, @@ -15791,6 +17874,13 @@ "smithy.api#httpQuery": "subscriptionId" } }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the owning project of the subscription grants.

", + "smithy.api#httpQuery": "owningProjectId" + } + }, "sortBy": { "target": "com.amazonaws.datazone#SortKey", "traits": { @@ -15900,7 +17990,7 @@ "status": { "target": "com.amazonaws.datazone#SubscriptionRequestStatus", "traits": { - "smithy.api#documentation": "

Specifies the status of the subscription requests.

", + "smithy.api#documentation": "

Specifies the status of the subscription requests.

\n \n

This is not a required parameter, but if not specified, by default, Amazon DataZone\n returns only PENDING subscription requests.

\n
", "smithy.api#httpQuery": "status" } }, @@ -16155,7 +18245,7 @@ "status": { "target": "com.amazonaws.datazone#SubscriptionStatus", "traits": { - "smithy.api#documentation": "

The status of the subscriptions that you want to list.

", + "smithy.api#documentation": "

The status of the subscriptions that you want to list.

\n \n

This is not a required parameter, but if not provided, by default, Amazon DataZone\n returns only APPROVED subscriptions.

\n
", "smithy.api#httpQuery": "status" } }, @@ -16487,6 +18577,12 @@ "traits": { "smithy.api#documentation": "

An asset published in an Amazon DataZone catalog.

" } + }, + "dataProductListing": { + "target": "com.amazonaws.datazone#DataProductListing", + "traits": { + "smithy.api#documentation": "

The data product listing.

" + } } }, "traits": { @@ -16569,6 +18665,70 @@ } } }, + "com.amazonaws.datazone#ListingSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ListingSummary" + } + }, + "com.amazonaws.datazone#ListingSummary": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The ID of the data product listing.

" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the data product listing.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms of the data product.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of the listing of the data product.

" + } + }, + "com.amazonaws.datazone#ListingSummaryItem": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The ID of the data product listing.

" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the data product listing.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms of the data product listing.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The results of the data product summary.

" + } + }, + "com.amazonaws.datazone#ListingSummaryItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ListingSummaryItem" + } + }, "com.amazonaws.datazone#LongDescription": { "type": "string", "traits": { @@ -16887,6 +19047,72 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.datazone#NotEqualToExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value that might not be equal to the expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies that a value is not equal to the expression.

" + } + }, + "com.amazonaws.datazone#NotInExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.datazone#StringList", + "traits": { + "smithy.api#documentation": "

The value that might not be in the expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies that a value is not in the expression.

" + } + }, + "com.amazonaws.datazone#NotLikeExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the column.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value that might not be like the expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies that a value might be not like the expression.

" + } + }, "com.amazonaws.datazone#NotificationOutput": { "type": "structure", "members": { @@ -17463,6 +19689,26 @@ "smithy.api#documentation": "

The details of a Amazon DataZone project.

" } }, + "com.amazonaws.datazone#ProvisioningConfiguration": { + "type": "union", + "members": { + "lakeFormationConfiguration": { + "target": "com.amazonaws.datazone#LakeFormationConfiguration", + "traits": { + "smithy.api#documentation": "

The Lake Formation configuration of the Data Lake blueprint.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The provisioning configuration of the blueprint.

" + } + }, + "com.amazonaws.datazone#ProvisioningConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ProvisioningConfiguration" + } + }, "com.amazonaws.datazone#ProvisioningProperties": { "type": "union", "members": { @@ -17558,6 +19804,13 @@ "traits": { "smithy.api#documentation": "

The regional parameters in the environment blueprint.

" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "

The provisioning configuration of a blueprint.

", + "smithy.api#notProperty": {} + } } }, "traits": { @@ -17618,6 +19871,13 @@ "smithy.api#documentation": "

The timestamp of when the environment blueprint was updated.

", "smithy.api#timestampFormat": "date-time" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "

The provisioning configuration of a blueprint.

", + "smithy.api#notProperty": {} + } } }, "traits": { @@ -18384,89 +20644,223 @@ "com.amazonaws.datazone#RevokeSubscriptionOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#SubscriptionId", + "id": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The identifier of the user who revoked the subscription.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who revoked the subscription.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where you want to revoke a subscription.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was revoked.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was revoked.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipal": { + "target": "com.amazonaws.datazone#SubscribedPrincipal", + "traits": { + "smithy.api#documentation": "

The subscribed principal of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "subscribedListing": { + "target": "com.amazonaws.datazone#SubscribedListing", + "traits": { + "smithy.api#documentation": "

The subscribed listing of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "subscriptionRequestId": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request for the revoked subscription.

" + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether permissions are retained when the subscription is revoked.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#RoleArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$" + } + }, + "com.amazonaws.datazone#RowFilter": { + "type": "union", + "members": { + "expression": { + "target": "com.amazonaws.datazone#RowFilterExpression", + "traits": { + "smithy.api#documentation": "

The expression of the row filter.

" + } + }, + "and": { + "target": "com.amazonaws.datazone#RowFilterList", + "traits": { + "smithy.api#documentation": "

The 'and' clause of the row filter.

" + } + }, + "or": { + "target": "com.amazonaws.datazone#RowFilterList", + "traits": { + "smithy.api#documentation": "

The 'or' clause of the row filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The row filter.

" + } + }, + "com.amazonaws.datazone#RowFilterConfiguration": { + "type": "structure", + "members": { + "rowFilter": { + "target": "com.amazonaws.datazone#RowFilter", + "traits": { + "smithy.api#documentation": "

The row filter.

", + "smithy.api#required": {} + } + }, + "sensitive": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#default": true, + "smithy.api#documentation": "

Specifies whether the row filter is sensitive.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The row filter configuration details.

" + } + }, + "com.amazonaws.datazone#RowFilterExpression": { + "type": "union", + "members": { + "equalTo": { + "target": "com.amazonaws.datazone#EqualToExpression", "traits": { - "smithy.api#documentation": "

The identifier of the revoked subscription.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'equal to' clause of the row filter expression.

" } }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "notEqualTo": { + "target": "com.amazonaws.datazone#NotEqualToExpression", "traits": { - "smithy.api#documentation": "

The identifier of the user who revoked the subscription.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'no equal to' clause of the row filter expression.

" } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "greaterThan": { + "target": "com.amazonaws.datazone#GreaterThanExpression", "traits": { - "smithy.api#documentation": "

The Amazon DataZone user who revoked the subscription.

" + "smithy.api#documentation": "

The 'greater than' clause of the row filter expression.

" } }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "lessThan": { + "target": "com.amazonaws.datazone#LessThanExpression", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where you want to revoke a subscription.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'less than' clause of the row filter expression.

" } }, - "status": { - "target": "com.amazonaws.datazone#SubscriptionStatus", + "greaterThanOrEqualTo": { + "target": "com.amazonaws.datazone#GreaterThanOrEqualToExpression", "traits": { - "smithy.api#documentation": "

The status of the revoked subscription.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'greater than or equal to' clause of the filter expression.

" } }, - "createdAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "lessThanOrEqualTo": { + "target": "com.amazonaws.datazone#LessThanOrEqualToExpression", "traits": { - "smithy.api#documentation": "

The timestamp of when the subscription was revoked.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'less than or equal to' clause of the row filter expression.

" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "isNull": { + "target": "com.amazonaws.datazone#IsNullExpression", "traits": { - "smithy.api#documentation": "

The timestamp of when the subscription was revoked.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'is null' clause of the row filter expression.

" } }, - "subscribedPrincipal": { - "target": "com.amazonaws.datazone#SubscribedPrincipal", + "isNotNull": { + "target": "com.amazonaws.datazone#IsNotNullExpression", "traits": { - "smithy.api#documentation": "

The subscribed principal of the revoked subscription.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'is not null' clause of the row filter expression.

" } }, - "subscribedListing": { - "target": "com.amazonaws.datazone#SubscribedListing", + "in": { + "target": "com.amazonaws.datazone#InExpression", "traits": { - "smithy.api#documentation": "

The subscribed listing of the revoked subscription.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The 'in' clause of the row filter expression.

" } }, - "subscriptionRequestId": { - "target": "com.amazonaws.datazone#SubscriptionRequestId", + "notIn": { + "target": "com.amazonaws.datazone#NotInExpression", "traits": { - "smithy.api#documentation": "

The identifier of the subscription request for the revoked subscription.

" + "smithy.api#documentation": "

The 'not in' clause of the row filter expression.

" } }, - "retainPermissions": { - "target": "smithy.api#Boolean", + "like": { + "target": "com.amazonaws.datazone#LikeExpression", "traits": { - "smithy.api#documentation": "

Specifies whether permissions are retained when the subscription is revoked.

" + "smithy.api#documentation": "

The 'like' clause of the row filter expression.

" + } + }, + "notLike": { + "target": "com.amazonaws.datazone#NotLikeExpression", + "traits": { + "smithy.api#documentation": "

The 'not like' clause of the row filter expression.

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

The row filter expression.

" } }, - "com.amazonaws.datazone#RoleArn": { - "type": "string", - "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$" + "com.amazonaws.datazone#RowFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#RowFilter" } }, "com.amazonaws.datazone#RunStatisticsForAssets": { @@ -18507,6 +20901,28 @@ "smithy.api#documentation": "

The asset statistics from the data source run.

" } }, + "com.amazonaws.datazone#S3Location": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^s3://.+$" + } + }, + "com.amazonaws.datazone#S3LocationList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#S3Location" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.datazone#ScheduleConfiguration": { "type": "structure", "members": { @@ -18786,12 +21202,9 @@ } }, "dataProductItem": { - "target": "com.amazonaws.datazone#DataProductSummary", + "target": "com.amazonaws.datazone#DataProductResultItem", "traits": { - "smithy.api#deprecated": { - "message": "This field is deprecated." - }, - "smithy.api#documentation": "

The data product item included in the search results.

" + "smithy.api#documentation": "

The data product.

" } } }, @@ -18986,6 +21399,12 @@ "traits": { "smithy.api#documentation": "

The asset listing included in the results of the SearchListings\n action.

" } + }, + "dataProductListing": { + "target": "com.amazonaws.datazone#DataProductListingItem", + "traits": { + "smithy.api#documentation": "

The data product listing.

" + } } }, "traits": { @@ -19833,6 +22252,12 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#StringList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, "com.amazonaws.datazone#SubscribedAsset": { "type": "structure", "members": { @@ -20020,6 +22445,12 @@ "traits": { "smithy.api#documentation": "

The asset for which the subscription grant is created.

" } + }, + "productListing": { + "target": "com.amazonaws.datazone#SubscribedProductListing", + "traits": { + "smithy.api#documentation": "

The data product listing.

" + } } }, "traits": { @@ -20078,6 +22509,50 @@ "target": "com.amazonaws.datazone#SubscribedPrincipal" } }, + "com.amazonaws.datazone#SubscribedProductListing": { + "type": "structure", + "members": { + "entityId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data product listing.

" + } + }, + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the data product listing.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms of the data product listing.

" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the data product listing.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the data product listing.

" + } + }, + "assetListings": { + "target": "com.amazonaws.datazone#AssetInDataProductListingItems", + "traits": { + "smithy.api#documentation": "

The data assets of the data product listing.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The data product listing.

" + } + }, "com.amazonaws.datazone#SubscribedProject": { "type": "structure", "members": { @@ -20292,7 +22767,10 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { - "smithy.api#documentation": "

The ID of the subscription grant.

" + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, + "smithy.api#documentation": "

The ID of the subscription.

" } } }, @@ -21600,6 +24078,173 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#UpdateAssetFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateAssetFilterInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateAssetFilterOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an asset filter.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where you want to update an asset filter.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the asset filter.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset filter.

" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the asset filter.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateAssetFilterOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "

The ID of the asset filter.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain where the asset filter was created.

", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the data asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "

The name of the asset filter.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset filter.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset filter.

" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the asset filter.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp at which the asset filter was created.

" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error message that is displayed if the action is not completed successfully.

" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "

The column names of the asset filter.

" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The row filter of the asset filter.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#UpdateDataSource": { "type": "operation", "input": { @@ -23254,6 +25899,9 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, "smithy.api#documentation": "

The identifier of the subscription.

" } } diff --git a/models/docdb.json b/models/docdb.json index 5dd7131697..bad56a44ce 100644 --- a/models/docdb.json +++ b/models/docdb.json @@ -245,6 +245,9 @@ { "target": "com.amazonaws.docdb#FailoverDBCluster" }, + { + "target": "com.amazonaws.docdb#FailoverGlobalCluster" + }, { "target": "com.amazonaws.docdb#ListTagsForResource" }, @@ -2812,6 +2815,16 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.docdb#DBClusterIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[A-Za-z][0-9A-Za-z-:._]*$" + } + }, "com.amazonaws.docdb#DBClusterList": { "type": "list", "member": { @@ -5765,6 +5778,79 @@ "smithy.api#output": {} } }, + "com.amazonaws.docdb#FailoverGlobalCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.docdb#FailoverGlobalClusterMessage" + }, + "output": { + "target": "com.amazonaws.docdb#FailoverGlobalClusterResult" + }, + "errors": [ + { + "target": "com.amazonaws.docdb#DBClusterNotFoundFault" + }, + { + "target": "com.amazonaws.docdb#GlobalClusterNotFoundFault" + }, + { + "target": "com.amazonaws.docdb#InvalidDBClusterStateFault" + }, + { + "target": "com.amazonaws.docdb#InvalidGlobalClusterStateFault" + } + ], + "traits": { + "smithy.api#documentation": "

Promotes the specified secondary DB cluster to be the primary DB cluster in the global cluster when failing over a global cluster occurs.

\n

Use this operation to respond to an unplanned event, such as a regional disaster in the primary region. \n Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. \n However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state.

" + } + }, + "com.amazonaws.docdb#FailoverGlobalClusterMessage": { + "type": "structure", + "members": { + "GlobalClusterIdentifier": { + "target": "com.amazonaws.docdb#GlobalClusterIdentifier", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The identifier of the Amazon DocumentDB global cluster to apply this operation. \n The identifier is the unique key assigned by the user when the cluster is created. \n In other words, it's the name of the global cluster.

\n

Constraints:

\n
    \n
  • \n

    Must match the identifier of an existing global cluster.

    \n
  • \n
  • \n

    Minimum length of 1. Maximum length of 255.

    \n
  • \n
\n

Pattern: [A-Za-z][0-9A-Za-z-:._]*\n

", + "smithy.api#required": {} + } + }, + "TargetDbClusterIdentifier": { + "target": "com.amazonaws.docdb#DBClusterIdentifier", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The identifier of the secondary Amazon DocumentDB cluster that you want to promote to the primary for the global cluster. \n Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.

\n

Constraints:

\n
    \n
  • \n

    Must match the identifier of an existing secondary cluster.

    \n
  • \n
  • \n

    Minimum length of 1. Maximum length of 255.

    \n
  • \n
\n

Pattern: [A-Za-z][0-9A-Za-z-:._]*\n

", + "smithy.api#required": {} + } + }, + "AllowDataLoss": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to allow data loss for this global cluster operation. Allowing data loss triggers a global failover operation.

\n

If you don't specify AllowDataLoss, the global cluster operation defaults to a switchover.

\n

Constraints:

\n
    \n
  • \n

    Can't be specified together with the Switchover parameter.

    \n
  • \n
" + } + }, + "Switchover": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to switch over this global database cluster.

\n

Constraints:

\n
    \n
  • \n

    Can't be specified together with the AllowDataLoss parameter.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.docdb#FailoverGlobalClusterResult": { + "type": "structure", + "members": { + "GlobalCluster": { + "target": "com.amazonaws.docdb#GlobalCluster" + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.docdb#Filter": { "type": "structure", "members": { @@ -8443,7 +8529,7 @@ } }, "TargetDbClusterIdentifier": { - "target": "com.amazonaws.docdb#String", + "target": "com.amazonaws.docdb#DBClusterIdentifier", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The identifier of the secondary Amazon DocumentDB cluster to promote to the new primary for the global database cluster. \n Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.

\n

Constraints:

\n
    \n
  • \n

    Must match the identifier of an existing secondary cluster.

    \n
  • \n
  • \n

    Minimum length of 1. Maximum length of 255.

    \n
  • \n
\n

Pattern: [A-Za-z][0-9A-Za-z-:._]*\n

", diff --git a/models/dynamodb.json b/models/dynamodb.json index 3dcf98b62d..de7cc45ce4 100644 --- a/models/dynamodb.json +++ b/models/dynamodb.json @@ -780,7 +780,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation allows you to perform batch reads or writes on data stored in DynamoDB,\n using PartiQL. Each read statement in a BatchExecuteStatement must specify\n an equality condition on all key attributes. This enforces that each SELECT\n statement in a batch returns at most a single item.

\n \n

The entire batch must consist of either read statements or write statements, you\n cannot mix both in one batch.

\n
\n \n

A HTTP 200 response does not mean that all statements in the BatchExecuteStatement\n succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each\n statement.

\n
" + "smithy.api#documentation": "

This operation allows you to perform batch reads or writes on data stored in DynamoDB,\n using PartiQL. Each read statement in a BatchExecuteStatement must specify\n an equality condition on all key attributes. This enforces that each SELECT\n statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB\n .

\n \n

The entire batch must consist of either read statements or write statements, you\n cannot mix both in one batch.

\n
\n \n

A HTTP 200 response does not mean that all statements in the BatchExecuteStatement\n succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each\n statement.

\n
" } }, "com.amazonaws.dynamodb#BatchExecuteStatementInput": { @@ -807,7 +807,7 @@ "Responses": { "target": "com.amazonaws.dynamodb#PartiQLBatchResponse", "traits": { - "smithy.api#documentation": "

The response to each PartiQL statement in the batch. The values of the list are \n ordered according to the ordering of the request statements.

" + "smithy.api#documentation": "

The response to each PartiQL statement in the batch. The values of the list are\n ordered according to the ordering of the request statements.

" } }, "ConsumedCapacity": { @@ -850,7 +850,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

The BatchGetItem operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.

\n

A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested,\n or an internal processing failure occurs. If a partial result is returned, the operation returns a value for\n UnprocessedKeys. You can use this value to retry the operation starting\n with the next item to get.

\n \n

If you request more than 100 items, BatchGetItem returns a\n ValidationException with the message \"Too many items requested for\n the BatchGetItem call.\"

\n
\n

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.

\n

If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem returns a\n ProvisionedThroughputExceededException. If at least\n one of the items is successfully processed, then\n BatchGetItem completes successfully, while returning the keys of the\n unread items in UnprocessedKeys.

\n \n

If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.

\n

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.

\n
\n

By default, BatchGetItem performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead to true for any or all tables.

\n

In order to minimize response latency, BatchGetItem may retrieve items in\n parallel.

\n

When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression parameter.

\n

If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.

", + "smithy.api#documentation": "

The BatchGetItem operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.

\n

A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is\n requested, or an internal processing failure occurs. If a partial result is returned,\n the operation returns a value for UnprocessedKeys. You can use this value\n to retry the operation starting with the next item to get.

\n \n

If you request more than 100 items, BatchGetItem returns a\n ValidationException with the message \"Too many items requested for\n the BatchGetItem call.\"

\n
\n

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.

\n

If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem returns a\n ProvisionedThroughputExceededException. If at least\n one of the items is successfully processed, then\n BatchGetItem completes successfully, while returning the keys of the\n unread items in UnprocessedKeys.

\n \n

If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.

\n

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.

\n
\n

By default, BatchGetItem performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead to true for any or all tables.

\n

In order to minimize response latency, BatchGetItem may retrieve items in\n parallel.

\n

When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression parameter.

\n

If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.

", "smithy.api#examples": [ { "title": "To retrieve multiple items from a table", @@ -919,7 +919,7 @@ "RequestItems": { "target": "com.amazonaws.dynamodb#BatchGetRequestMap", "traits": { - "smithy.api#documentation": "

A map of one or more table names or table ARNs and, for each table, a map that describes one or more\n items to retrieve from that table. Each table name or ARN can be used only once per\n BatchGetItem request.

\n

Each element in the map of items to retrieve consists of the following:

\n
    \n
  • \n

    \n ConsistentRead - If true, a strongly consistent read\n is used; if false (the default), an eventually consistent read is\n used.

    \n
  • \n
  • \n

    \n ExpressionAttributeNames - One or more substitution tokens for\n attribute names in the ProjectionExpression parameter. The\n following are some use cases for using\n ExpressionAttributeNames:

    \n
      \n
    • \n

      To access an attribute whose name conflicts with a DynamoDB reserved\n word.

      \n
    • \n
    • \n

      To create a placeholder for repeating occurrences of an attribute name\n in an expression.

      \n
    • \n
    • \n

      To prevent special characters in an attribute name from being\n misinterpreted in an expression.

      \n
    • \n
    \n

    Use the # character in an expression to\n dereference an attribute name. For example, consider the following attribute\n name:

    \n
      \n
    • \n

      \n Percentile\n

      \n
    • \n
    \n

    The name of this attribute conflicts with a reserved word, so it cannot be\n used directly in an expression. (For the complete list of reserved words, see\n Reserved\n Words in the Amazon DynamoDB Developer Guide).\n To work around this, you could specify the following for\n ExpressionAttributeNames:

    \n
      \n
    • \n

      \n {\"#P\":\"Percentile\"}\n

      \n
    • \n
    \n

    You could then use this substitution in an expression, as in this\n example:

    \n
      \n
    • \n

      \n #P = :val\n

      \n
    • \n
    \n \n

    Tokens that begin with the : character\n are expression attribute values, which are placeholders\n for the actual value at runtime.

    \n
    \n

    For more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.

    \n
  • \n
  • \n

    \n Keys - An array of primary key attribute values that define\n specific items in the table. For each primary key, you must provide\n all of the key attributes. For example, with a simple\n primary key, you only need to provide the partition key value. For a composite\n key, you must provide both the partition key value and the\n sort key value.

    \n
  • \n
  • \n

    \n ProjectionExpression - A string that identifies one or more\n attributes to retrieve from the table. These attributes can include scalars,\n sets, or elements of a JSON document. The attributes in the expression must be\n separated by commas.

    \n

    If no attribute names are specified, then all attributes are returned. If any\n of the requested attributes are not found, they do not appear in the\n result.

    \n

    For more information, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.

    \n
  • \n
  • \n

    \n AttributesToGet - This is a legacy parameter. Use\n ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer\n Guide.

    \n
  • \n
", + "smithy.api#documentation": "

A map of one or more table names or table ARNs and, for each table, a map that\n describes one or more items to retrieve from that table. Each table name or ARN can be\n used only once per BatchGetItem request.

\n

Each element in the map of items to retrieve consists of the following:

\n
    \n
  • \n

    \n ConsistentRead - If true, a strongly consistent read\n is used; if false (the default), an eventually consistent read is\n used.

    \n
  • \n
  • \n

    \n ExpressionAttributeNames - One or more substitution tokens for\n attribute names in the ProjectionExpression parameter. The\n following are some use cases for using\n ExpressionAttributeNames:

    \n
      \n
    • \n

      To access an attribute whose name conflicts with a DynamoDB reserved\n word.

      \n
    • \n
    • \n

      To create a placeholder for repeating occurrences of an attribute name\n in an expression.

      \n
    • \n
    • \n

      To prevent special characters in an attribute name from being\n misinterpreted in an expression.

      \n
    • \n
    \n

    Use the # character in an expression to\n dereference an attribute name. For example, consider the following attribute\n name:

    \n
      \n
    • \n

      \n Percentile\n

      \n
    • \n
    \n

    The name of this attribute conflicts with a reserved word, so it cannot be\n used directly in an expression. (For the complete list of reserved words, see\n Reserved\n Words in the Amazon DynamoDB Developer Guide).\n To work around this, you could specify the following for\n ExpressionAttributeNames:

    \n
      \n
    • \n

      \n {\"#P\":\"Percentile\"}\n

      \n
    • \n
    \n

    You could then use this substitution in an expression, as in this\n example:

    \n
      \n
    • \n

      \n #P = :val\n

      \n
    • \n
    \n \n

    Tokens that begin with the : character\n are expression attribute values, which are placeholders\n for the actual value at runtime.

    \n
    \n

    For more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.

    \n
  • \n
  • \n

    \n Keys - An array of primary key attribute values that define\n specific items in the table. For each primary key, you must provide\n all of the key attributes. For example, with a simple\n primary key, you only need to provide the partition key value. For a composite\n key, you must provide both the partition key value and the\n sort key value.

    \n
  • \n
  • \n

    \n ProjectionExpression - A string that identifies one or more\n attributes to retrieve from the table. These attributes can include scalars,\n sets, or elements of a JSON document. The attributes in the expression must be\n separated by commas.

    \n

    If no attribute names are specified, then all attributes are returned. If any\n of the requested attributes are not found, they do not appear in the\n result.

    \n

    For more information, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.

    \n
  • \n
  • \n

    \n AttributesToGet - This is a legacy parameter. Use\n ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer\n Guide.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -938,7 +938,7 @@ "Responses": { "target": "com.amazonaws.dynamodb#BatchGetResponseMap", "traits": { - "smithy.api#documentation": "

A map of table name or table ARN to a list of items. Each object in Responses consists\n of a table name or ARN, along with a map of attribute data consisting of the data type and\n attribute value.

" + "smithy.api#documentation": "

A map of table name or table ARN to a list of items. Each object in\n Responses consists of a table name or ARN, along with a map of\n attribute data consisting of the data type and attribute value.

" } }, "UnprocessedKeys": { @@ -1171,7 +1171,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

The BatchWriteItem operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem can transmit up to 16MB of\n data over the network, consisting of up to 25 item put or delete operations. While\n individual items can be up to 400 KB once stored, it's important to note that an item's\n representation might be greater than 400KB while being sent in DynamoDB's JSON format\n for the API call. For more details on this distinction, see Naming Rules and Data Types.

\n \n

\n BatchWriteItem cannot update items. If you perform a BatchWriteItem\n operation on an existing item, that item's values will be overwritten by the\n operation and it will appear like it was updated. To update items, we recommend you\n use the UpdateItem action.

\n
\n

The individual PutItem and DeleteItem operations specified\n in BatchWriteItem are atomic; however BatchWriteItem as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem request with those unprocessed items\n until all items have been processed.

\n

If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchWriteItem returns a\n ProvisionedThroughputExceededException.

\n \n

If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.

\n

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.

\n
\n

With BatchWriteItem, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem does not behave in the same way as individual\n PutItem and DeleteItem calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem does not return deleted items in the response.

\n

If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.

\n

Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.

\n

If one or more of the following is true, DynamoDB rejects the entire batch write\n operation:

\n
    \n
  • \n

    One or more tables specified in the BatchWriteItem request does\n not exist.

    \n
  • \n
  • \n

    Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.

    \n
  • \n
  • \n

    You try to perform multiple operations on the same item in the same\n BatchWriteItem request. For example, you cannot put and delete\n the same item in the same BatchWriteItem request.

    \n
  • \n
  • \n

    Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).

    \n
  • \n
  • \n

    There are more than 25 requests in the batch.

    \n
  • \n
  • \n

    Any individual item in a batch exceeds 400 KB.

    \n
  • \n
  • \n

    The total request size exceeds 16 MB.

    \n
  • \n
  • \n

    Any individual items with keys exceeding the key length limits. For a\n partition key, the limit is 2048 bytes and for a sort key, the limit is 1024\n bytes.

    \n
  • \n
", + "smithy.api#documentation": "

The BatchWriteItem operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem can transmit up to 16MB of\n data over the network, consisting of up to 25 item put or delete operations. While\n individual items can be up to 400 KB once stored, it's important to note that an item's\n representation might be greater than 400KB while being sent in DynamoDB's JSON format\n for the API call. For more details on this distinction, see Naming Rules and Data Types.

\n \n

\n BatchWriteItem cannot update items. If you perform a\n BatchWriteItem operation on an existing item, that item's values\n will be overwritten by the operation and it will appear like it was updated. To\n update items, we recommend you use the UpdateItem action.

\n
\n

The individual PutItem and DeleteItem operations specified\n in BatchWriteItem are atomic; however BatchWriteItem as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem request with those unprocessed items\n until all items have been processed.

\n

For tables and indexes with provisioned capacity, if none of the items can be\n processed due to insufficient provisioned throughput on all of the tables in the\n request, then BatchWriteItem returns a\n ProvisionedThroughputExceededException. For all tables and indexes, if\n none of the items can be processed due to other throttling scenarios (such as exceeding\n partition level limits), then BatchWriteItem returns a\n ThrottlingException.

\n \n

If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.

\n

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.

\n
\n

With BatchWriteItem, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem does not behave in the same way as individual\n PutItem and DeleteItem calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem does not return deleted items in the response.

\n

If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.

\n

Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.

\n

If one or more of the following is true, DynamoDB rejects the entire batch write\n operation:

\n
    \n
  • \n

    One or more tables specified in the BatchWriteItem request does\n not exist.

    \n
  • \n
  • \n

    Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.

    \n
  • \n
  • \n

    You try to perform multiple operations on the same item in the same\n BatchWriteItem request. For example, you cannot put and delete\n the same item in the same BatchWriteItem request.

    \n
  • \n
  • \n

    Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).

    \n
  • \n
  • \n

    There are more than 25 requests in the batch.

    \n
  • \n
  • \n

    Any individual item in a batch exceeds 400 KB.

    \n
  • \n
  • \n

    The total request size exceeds 16 MB.

    \n
  • \n
  • \n

    Any individual items with keys exceeding the key length limits. For a\n partition key, the limit is 2048 bytes and for a sort key, the limit is 1024\n bytes.

    \n
  • \n
", "smithy.api#examples": [ { "title": "To add multiple items to a table", @@ -1238,7 +1238,7 @@ "RequestItems": { "target": "com.amazonaws.dynamodb#BatchWriteItemRequestMap", "traits": { - "smithy.api#documentation": "

A map of one or more table names or table ARNs and, for each table, a list of operations to be\n performed (DeleteRequest or PutRequest). Each element in the\n map consists of the following:

\n
    \n
  • \n

    \n DeleteRequest - Perform a DeleteItem operation on the\n specified item. The item to be deleted is identified by a Key\n subelement:

    \n
      \n
    • \n

      \n Key - A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value. For each primary key, you must provide\n all of the key attributes. For example, with a\n simple primary key, you only need to provide a value for the partition\n key. For a composite primary key, you must provide values for\n both the partition key and the sort key.

      \n
    • \n
    \n
  • \n
  • \n

    \n PutRequest - Perform a PutItem operation on the\n specified item. The item to be put is identified by an Item\n subelement:

    \n
      \n
    • \n

      \n Item - A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values are rejected with a\n ValidationException exception.

      \n

      If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

A map of one or more table names or table ARNs and, for each table, a list of\n operations to be performed (DeleteRequest or PutRequest). Each\n element in the map consists of the following:

\n
    \n
  • \n

    \n DeleteRequest - Perform a DeleteItem operation on the\n specified item. The item to be deleted is identified by a Key\n subelement:

    \n
      \n
    • \n

      \n Key - A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value. For each primary key, you must provide\n all of the key attributes. For example, with a\n simple primary key, you only need to provide a value for the partition\n key. For a composite primary key, you must provide values for\n both the partition key and the sort key.

      \n
    • \n
    \n
  • \n
  • \n

    \n PutRequest - Perform a PutItem operation on the\n specified item. The item to be put is identified by an Item\n subelement:

    \n
      \n
    • \n

      \n Item - A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values are rejected with a\n ValidationException exception.

      \n

      If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -1263,7 +1263,7 @@ "UnprocessedItems": { "target": "com.amazonaws.dynamodb#BatchWriteItemRequestMap", "traits": { - "smithy.api#documentation": "

A map of tables and requests against those tables that were not processed. The\n UnprocessedItems value is in the same form as\n RequestItems, so you can provide this value directly to a subsequent\n BatchWriteItem operation. For more information, see\n RequestItems in the Request Parameters section.

\n

Each UnprocessedItems entry consists of a table name or table ARN and, for that table,\n a list of operations to perform (DeleteRequest or\n PutRequest).

\n
    \n
  • \n

    \n DeleteRequest - Perform a DeleteItem operation on the\n specified item. The item to be deleted is identified by a Key\n subelement:

    \n
      \n
    • \n

      \n Key - A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value.

      \n
    • \n
    \n
  • \n
  • \n

    \n PutRequest - Perform a PutItem operation on the\n specified item. The item to be put is identified by an Item\n subelement:

    \n
      \n
    • \n

      \n Item - A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values will be rejected with a\n ValidationException exception.

      \n

      If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.

      \n
    • \n
    \n
  • \n
\n

If there are no unprocessed items remaining, the response contains an empty\n UnprocessedItems map.

" + "smithy.api#documentation": "

A map of tables and requests against those tables that were not processed. The\n UnprocessedItems value is in the same form as\n RequestItems, so you can provide this value directly to a subsequent\n BatchWriteItem operation. For more information, see\n RequestItems in the Request Parameters section.

\n

Each UnprocessedItems entry consists of a table name or table ARN\n and, for that table, a list of operations to perform (DeleteRequest or\n PutRequest).

\n
    \n
  • \n

    \n DeleteRequest - Perform a DeleteItem operation on the\n specified item. The item to be deleted is identified by a Key\n subelement:

    \n
      \n
    • \n

      \n Key - A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value.

      \n
    • \n
    \n
  • \n
  • \n

    \n PutRequest - Perform a PutItem operation on the\n specified item. The item to be put is identified by an Item\n subelement:

    \n
      \n
    • \n

      \n Item - A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values will be rejected with a\n ValidationException exception.

      \n

      If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.

      \n
    • \n
    \n
  • \n
\n

If there are no unprocessed items remaining, the response contains an empty\n UnprocessedItems map.

" } }, "ItemCollectionMetrics": { @@ -1900,7 +1900,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.

", "smithy.api#required": {} } }, @@ -2141,7 +2141,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to create. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to create. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.

", "smithy.api#required": {} } }, @@ -2203,19 +2203,19 @@ "DeletionProtectionEnabled": { "target": "com.amazonaws.dynamodb#DeletionProtectionEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table.

" + "smithy.api#documentation": "

Indicates whether deletion protection is to be enabled (true) or disabled (false) on\n the table.

" } }, "ResourcePolicy": { "target": "com.amazonaws.dynamodb#ResourcePolicy", "traits": { - "smithy.api#documentation": "

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

\n

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

\n

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

\n \n

You need to specify the CreateTable and PutResourcePolicy IAM actions for authorizing a user to create a table with a resource-based policy.

\n
" + "smithy.api#documentation": "

An Amazon Web Services resource-based policy document in JSON format that will be\n attached to the table.

\n

When you attach a resource-based policy while creating a table, the policy application\n is strongly consistent.

\n

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this\n limit. For a full list of all considerations that apply for resource-based policies, see\n Resource-based\n policy considerations.

\n \n

You need to specify the CreateTable and\n PutResourcePolicy\n IAM actions for authorizing a user to create a table with a\n resource-based policy.

\n
" } }, "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

Sets the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

Sets the maximum number of read and write units for the specified table in on-demand\n capacity mode. If you use this parameter, you must specify\n MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } } }, @@ -2486,7 +2486,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table from which to delete the item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table from which to delete the item. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -2658,7 +2658,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Deletes the resource-based policy attached to the resource, which can be a table or stream.

\n

\n DeleteResourcePolicy is an idempotent operation; running it multiple times on the same resource doesn't result in an error response, unless you specify an ExpectedRevisionId, which will then return a PolicyNotFoundException.

\n \n

To make sure that you don't inadvertently lock yourself out of your own resources, the root principal in your Amazon Web Services account can perform DeleteResourcePolicy requests, even if your resource-based policy explicitly denies the root principal's access.\n

\n
\n \n

\n DeleteResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy request immediately after running the DeleteResourcePolicy request, DynamoDB might still return the deleted policy. This is because the policy for your resource might not have been deleted yet. Wait for a few seconds, and then try the GetResourcePolicy request again.

\n
" + "smithy.api#documentation": "

Deletes the resource-based policy attached to the resource, which can be a table or\n stream.

\n

\n DeleteResourcePolicy is an idempotent operation; running it multiple\n times on the same resource doesn't result in an error response,\n unless you specify an ExpectedRevisionId, which will then return a\n PolicyNotFoundException.

\n \n

To make sure that you don't inadvertently lock yourself out of your own resources,\n the root principal in your Amazon Web Services account can perform\n DeleteResourcePolicy requests, even if your resource-based policy\n explicitly denies the root principal's access.

\n
\n \n

\n DeleteResourcePolicy is an asynchronous operation. If you issue a\n GetResourcePolicy request immediately after running the\n DeleteResourcePolicy request, DynamoDB might still return\n the deleted policy. This is because the policy for your resource might not have been\n deleted yet. Wait for a few seconds, and then try the GetResourcePolicy\n request again.

\n
" } }, "com.amazonaws.dynamodb#DeleteResourcePolicyInput": { @@ -2667,14 +2667,14 @@ "ResourceArn": { "target": "com.amazonaws.dynamodb#ResourceArnString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy will be removed. The resources you can specify include tables and streams. If you remove the policy of a table, it will also remove the permissions for the table's indexes defined in that policy document. This is because index permissions are defined in the table's policy.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy will be\n removed. The resources you can specify include tables and streams. If you remove the\n policy of a table, it will also remove the permissions for the table's indexes defined\n in that policy document. This is because index permissions are defined in the table's\n policy.

", "smithy.api#required": {} } }, "ExpectedRevisionId": { "target": "com.amazonaws.dynamodb#PolicyRevisionId", "traits": { - "smithy.api#documentation": "

A string value that you can use to conditionally delete your policy. When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, the request will fail and return a PolicyNotFoundException.

" + "smithy.api#documentation": "

A string value that you can use to conditionally delete your policy. When you provide\n an expected revision ID, if the revision ID of the existing policy on the resource\n doesn't match or if there's no policy attached to the resource, the request will fail\n and return a PolicyNotFoundException.

" } } }, @@ -2688,7 +2688,7 @@ "RevisionId": { "target": "com.amazonaws.dynamodb#PolicyRevisionId", "traits": { - "smithy.api#documentation": "

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

\n

This value will be empty if you make a request against a resource without a policy.

" + "smithy.api#documentation": "

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

\n

This value will be empty if you make a request against a resource without a\n policy.

" } } }, @@ -2725,7 +2725,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

The DeleteTable operation deletes a table and all of its items. After a\n DeleteTable request, the specified table is in the\n DELETING state until DynamoDB completes the deletion. If the table is\n in the ACTIVE state, you can delete it. If a table is in\n CREATING or UPDATING states, then DynamoDB returns a\n ResourceInUseException. If the specified table does not exist, DynamoDB\n returns a ResourceNotFoundException. If table is already in the\n DELETING state, no error is returned.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
\n \n

DynamoDB might continue to accept data read and write operations, such as\n GetItem and PutItem, on a table in the\n DELETING state until the table deletion is complete.

\n
\n

When you delete a table, any indexes on that table are also deleted.

\n

If you have DynamoDB Streams enabled on the table, then the corresponding stream on\n that table goes into the DISABLED state, and the stream is automatically\n deleted after 24 hours.

\n

Use the DescribeTable action to check the status of the table.

", + "smithy.api#documentation": "

The DeleteTable operation deletes a table and all of its items. After a\n DeleteTable request, the specified table is in the\n DELETING state until DynamoDB completes the deletion. If the table is\n in the ACTIVE state, you can delete it. If a table is in\n CREATING or UPDATING states, then DynamoDB returns a\n ResourceInUseException. If the specified table does not exist, DynamoDB\n returns a ResourceNotFoundException. If table is already in the\n DELETING state, no error is returned.

\n \n

For global tables, this operation only applies to\n global tables using Version 2019.11.21 (Current version).

\n
\n \n

DynamoDB might continue to accept data read and write operations, such as\n GetItem and PutItem, on a table in the\n DELETING state until the table deletion is complete. For the full\n list of table states, see TableStatus.

\n
\n

When you delete a table, any indexes on that table are also deleted.

\n

If you have DynamoDB Streams enabled on the table, then the corresponding stream on\n that table goes into the DISABLED state, and the stream is automatically\n deleted after 24 hours.

\n

Use the DescribeTable action to check the status of the table.

", "smithy.api#examples": [ { "title": "To delete a table", @@ -2756,7 +2756,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to delete. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to delete. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.

", "smithy.api#required": {} } } @@ -2920,7 +2920,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.

", "smithy.api#required": {} } }, @@ -2988,7 +2988,7 @@ "target": "com.amazonaws.dynamodb#DescribeEndpointsResponse" }, "traits": { - "smithy.api#documentation": "

Returns the regional endpoint information. For more information \n on policy permissions, please see Internetwork traffic privacy.

" + "smithy.api#documentation": "

Returns the regional endpoint information. For more information on policy permissions,\n please see Internetwork traffic privacy.

" } }, "com.amazonaws.dynamodb#DescribeEndpointsRequest": { @@ -3260,7 +3260,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table being described. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table being described. You can also provide the Amazon Resource Name (ARN) of the table\n in this parameter.

", "smithy.api#required": {} } } @@ -3388,7 +3388,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Returns information about the table, including the current status of the table, when\n it was created, the primary key schema, and any indexes on the table.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
\n \n

If you issue a DescribeTable request immediately after a\n CreateTable request, DynamoDB might return a\n ResourceNotFoundException. This is because\n DescribeTable uses an eventually consistent query, and the metadata\n for your table might not be available at that moment. Wait for a few seconds, and\n then try the DescribeTable request again.

\n
", + "smithy.api#documentation": "

Returns information about the table, including the current status of the table, when\n it was created, the primary key schema, and any indexes on the table.

\n \n

For global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).

\n
\n \n

If you issue a DescribeTable request immediately after a\n CreateTable request, DynamoDB might return a\n ResourceNotFoundException. This is because\n DescribeTable uses an eventually consistent query, and the metadata\n for your table might not be available at that moment. Wait for a few seconds, and\n then try the DescribeTable request again.

\n
", "smithy.waiters#waitable": { "TableExists": { "acceptors": [ @@ -3431,7 +3431,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.

", "smithy.api#required": {} } } @@ -3473,7 +3473,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes auto scaling settings across replicas of the global table at once.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

\n
" + "smithy.api#documentation": "

Describes auto scaling settings across replicas of the global table at once.

\n \n

For global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).

\n
" } }, "com.amazonaws.dynamodb#DescribeTableReplicaAutoScalingInput": { @@ -3482,7 +3482,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.

", "smithy.api#required": {} } } @@ -3537,7 +3537,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to be described. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to be described. You can also provide the Amazon Resource Name (ARN) of the table\n in this parameter.

", "smithy.api#required": {} } } @@ -3630,7 +3630,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Stops replication from the DynamoDB table to the Kinesis data stream. This is done\n without deleting either of the resources.

" + "smithy.api#documentation": "

Stops replication from the DynamoDB table to the Kinesis data stream. This\n is done without deleting either of the resources.

" } }, "com.amazonaws.dynamodb#DoubleObject": { @@ -5057,7 +5057,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation allows you to perform reads and singleton writes on data stored in\n DynamoDB, using PartiQL.

\n

For PartiQL reads (SELECT statement), if the total number of processed\n items exceeds the maximum dataset size limit of 1 MB, the read stops and results are\n returned to the user as a LastEvaluatedKey value to continue the read in a\n subsequent operation. If the filter criteria in WHERE clause does not match\n any data, the read will return an empty result set.

\n

A single SELECT statement response can return up to the maximum number of\n items (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any\n filtering to the results using WHERE clause). If\n LastEvaluatedKey is present in the response, you need to paginate the\n result set. If NextToken is present, you need to paginate the result set and include \n NextToken.

" + "smithy.api#documentation": "

This operation allows you to perform reads and singleton writes on data stored in\n DynamoDB, using PartiQL.

\n

For PartiQL reads (SELECT statement), if the total number of processed\n items exceeds the maximum dataset size limit of 1 MB, the read stops and results are\n returned to the user as a LastEvaluatedKey value to continue the read in a\n subsequent operation. If the filter criteria in WHERE clause does not match\n any data, the read will return an empty result set.

\n

A single SELECT statement response can return up to the maximum number of\n items (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any\n filtering to the results using WHERE clause). If\n LastEvaluatedKey is present in the response, you need to paginate the\n result set. If NextToken is present, you need to paginate the result set\n and include NextToken.

" } }, "com.amazonaws.dynamodb#ExecuteStatementInput": { @@ -5100,7 +5100,7 @@ "ReturnValuesOnConditionCheckFailure": { "target": "com.amazonaws.dynamodb#ReturnValuesOnConditionCheckFailure", "traits": { - "smithy.api#documentation": "

An optional parameter that returns the item attributes for an\n ExecuteStatement operation that failed a condition check.

\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

" + "smithy.api#documentation": "

An optional parameter that returns the item attributes for an\n ExecuteStatement operation that failed a condition check.

\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

" } } }, @@ -5580,7 +5580,7 @@ "S3BucketOwner": { "target": "com.amazonaws.dynamodb#S3BucketOwner", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the bucket the export will be\n stored in.

\n \n

S3BucketOwner is a required parameter when exporting to a S3 bucket in another account.

\n
" + "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the bucket the export will be\n stored in.

\n \n

S3BucketOwner is a required parameter when exporting to a S3 bucket in another\n account.

\n
" } }, "S3Prefix": { @@ -5610,7 +5610,7 @@ "ExportType": { "target": "com.amazonaws.dynamodb#ExportType", "traits": { - "smithy.api#documentation": "

Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be used.

" + "smithy.api#documentation": "

Choice of whether to execute as a full export or incremental export. Valid values are\n FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. If\n INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be\n used.

" } }, "IncrementalExportSpecification": { @@ -5839,7 +5839,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table containing the requested item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table containing the requested item. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -5895,7 +5895,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the GetItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the GetItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon\n DynamoDB Developer Guide.

" } } }, @@ -5930,7 +5930,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Returns the resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

\n

\n GetResourcePolicy follows an \n eventually consistent\n model. The following list describes the outcomes when you issue the GetResourcePolicy request immediately after issuing another request:

\n
    \n
  • \n

    If you issue a GetResourcePolicy request immediately after a PutResourcePolicy request, DynamoDB might return a PolicyNotFoundException.

    \n
  • \n
  • \n

    If you issue a GetResourcePolicyrequest immediately after a DeleteResourcePolicy request, DynamoDB might return the policy that was present before the deletion request.

    \n
  • \n
  • \n

    If you issue a GetResourcePolicy request immediately after a CreateTable request, which includes a resource-based policy, DynamoDB might return a ResourceNotFoundException or a PolicyNotFoundException.

    \n
  • \n
\n

Because GetResourcePolicy uses an eventually consistent query, the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then retry the GetResourcePolicy request.

\n

After a GetResourcePolicy request returns a policy created using the PutResourcePolicy request, the policy will be applied in the authorization of requests to the resource. Because this process is eventually consistent, it will take some time to apply the policy to all requests to a resource. Policies that you attach while creating a table using the CreateTable request will always be applied to all requests for that table.

" + "smithy.api#documentation": "

Returns the resource-based policy document attached to the resource, which can be a\n table or stream, in JSON format.

\n

\n GetResourcePolicy follows an \n eventually consistent\n model. The following list\n describes the outcomes when you issue the GetResourcePolicy request\n immediately after issuing another request:

\n
    \n
  • \n

    If you issue a GetResourcePolicy request immediately after a\n PutResourcePolicy request, DynamoDB might return a\n PolicyNotFoundException.

    \n
  • \n
  • \n

    If you issue a GetResourcePolicyrequest immediately after a\n DeleteResourcePolicy request, DynamoDB might return\n the policy that was present before the deletion request.

    \n
  • \n
  • \n

    If you issue a GetResourcePolicy request immediately after a\n CreateTable request, which includes a resource-based policy,\n DynamoDB might return a ResourceNotFoundException or\n a PolicyNotFoundException.

    \n
  • \n
\n

Because GetResourcePolicy uses an eventually\n consistent query, the metadata for your policy or table might not be\n available at that moment. Wait for a few seconds, and then retry the\n GetResourcePolicy request.

\n

After a GetResourcePolicy request returns a policy created using the\n PutResourcePolicy request, the policy will be applied in the\n authorization of requests to the resource. Because this process is eventually\n consistent, it will take some time to apply the policy to all requests to a resource.\n Policies that you attach while creating a table using the CreateTable\n request will always be applied to all requests for that table.

" } }, "com.amazonaws.dynamodb#GetResourcePolicyInput": { @@ -5939,7 +5939,7 @@ "ResourceArn": { "target": "com.amazonaws.dynamodb#ResourceArnString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is attached. The resources you can specify include tables and streams.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is attached. The\n resources you can specify include tables and streams.

", "smithy.api#required": {} } } @@ -5954,7 +5954,7 @@ "Policy": { "target": "com.amazonaws.dynamodb#ResourcePolicy", "traits": { - "smithy.api#documentation": "

The resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

" + "smithy.api#documentation": "

The resource-based policy document attached to the resource, which can be a table or\n stream, in JSON format.

" } }, "RevisionId": { @@ -7214,7 +7214,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the DynamoDB table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the DynamoDB table. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.

", "smithy.api#required": {} } }, @@ -7306,7 +7306,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. \n To list these backups for a given table, specify TableName. ListBackups returns a\n paginated list of results with at most 1 MB worth of items in a page. You can also\n specify a maximum number of entries to be returned in a page.

\n

In the request, start time is inclusive, but end time is exclusive. Note that these\n boundaries are for the time at which the original backup was requested.

\n

You can call ListBackups a maximum of five times per second.

\n

If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the \n Amazon Web Services Backup list API.\n

" + "smithy.api#documentation": "

List DynamoDB backups that are associated with an Amazon Web Services account and\n weren't made with Amazon Web Services Backup. To list these backups for a given table,\n specify TableName. ListBackups returns a paginated list of\n results with at most 1 MB worth of items in a page. You can also specify a maximum\n number of entries to be returned in a page.

\n

In the request, start time is inclusive, but end time is exclusive. Note that these\n boundaries are for the time at which the original backup was requested.

\n

You can call ListBackups a maximum of five times per second.

\n

If you want to retrieve the complete list of backups made with Amazon Web Services\n Backup, use the Amazon Web Services Backup\n list API.\n

" } }, "com.amazonaws.dynamodb#ListBackupsInput": { @@ -7315,7 +7315,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

Lists the backups from the table specified in TableName. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

" + "smithy.api#documentation": "

Lists the backups from the table specified in TableName. You can also\n provide the Amazon Resource Name (ARN) of the table in this parameter.

" } }, "Limit": { @@ -7404,7 +7404,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

" + "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.

" } }, "NextToken": { @@ -8490,7 +8490,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Creates a new item, or replaces an old item with a new item. If an item that has the\n same primary key as the new item already exists in the specified table, the new item\n completely replaces the existing item. You can perform a conditional put operation (add\n a new item if one with the specified primary key doesn't exist), or replace an existing\n item if it has certain attribute values. You can return the item's attribute values in\n the same operation, using the ReturnValues parameter.

\n

When you add an item, the primary key attributes are the only required attributes.\n

\n

Empty String and Binary attribute values are allowed. Attribute values of type String\n and Binary must have a length greater than zero if the attribute is used as a key\n attribute for a table or index. Set type attributes cannot be empty.

\n

Invalid Requests with empty values will be rejected with a\n ValidationException exception.

\n \n

To prevent a new item from replacing an existing item, use a conditional\n expression that contains the attribute_not_exists function with the\n name of the attribute being used as the partition key for the table. Since every\n record must contain that attribute, the attribute_not_exists function\n will only succeed if no matching item exists.

\n
\n

For more information about PutItem, see Working with\n Items in the Amazon DynamoDB Developer Guide.

", + "smithy.api#documentation": "

Creates a new item, or replaces an old item with a new item. If an item that has the\n same primary key as the new item already exists in the specified table, the new item\n completely replaces the existing item. You can perform a conditional put operation (add\n a new item if one with the specified primary key doesn't exist), or replace an existing\n item if it has certain attribute values. You can return the item's attribute values in\n the same operation, using the ReturnValues parameter.

\n

When you add an item, the primary key attributes are the only required attributes.

\n

Empty String and Binary attribute values are allowed. Attribute values of type String\n and Binary must have a length greater than zero if the attribute is used as a key\n attribute for a table or index. Set type attributes cannot be empty.

\n

Invalid Requests with empty values will be rejected with a\n ValidationException exception.

\n \n

To prevent a new item from replacing an existing item, use a conditional\n expression that contains the attribute_not_exists function with the\n name of the attribute being used as the partition key for the table. Since every\n record must contain that attribute, the attribute_not_exists function\n will only succeed if no matching item exists.

\n
\n

For more information about PutItem, see Working with\n Items in the Amazon DynamoDB Developer Guide.

", "smithy.api#examples": [ { "title": "To add an item to a table", @@ -8526,7 +8526,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to contain the item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to contain the item. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.

", "smithy.api#required": {} } }, @@ -8615,7 +8615,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the PutItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the PutItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unity consumption for write operations in the Amazon\n DynamoDB Developer Guide.

" } }, "ItemCollectionMetrics": { @@ -8677,7 +8677,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Attaches a resource-based policy document to the resource, which can be a table or stream. When you attach a resource-based policy using this API, the policy application is \n eventually consistent\n .

\n

\n PutResourcePolicy is an idempotent operation; running it multiple times on the same resource using the same policy document will return the same revision ID. If you specify an ExpectedRevisionId that doesn't match the current policy's RevisionId, the PolicyNotFoundException will be returned.

\n \n

\n PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy request immediately after a PutResourcePolicy request, DynamoDB might return your previous policy, if there was one, or return the PolicyNotFoundException. This is because GetResourcePolicy uses an eventually consistent query, and the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then try the GetResourcePolicy request again.

\n
" + "smithy.api#documentation": "

Attaches a resource-based policy document to the resource, which can be a table or\n stream. When you attach a resource-based policy using this API, the policy application\n is \n eventually consistent\n .

\n

\n PutResourcePolicy is an idempotent operation; running it multiple times\n on the same resource using the same policy document will return the same revision ID. If\n you specify an ExpectedRevisionId that doesn't match the current policy's\n RevisionId, the PolicyNotFoundException will be\n returned.

\n \n

\n PutResourcePolicy is an asynchronous operation. If you issue a\n GetResourcePolicy request immediately after a\n PutResourcePolicy request, DynamoDB might return your\n previous policy, if there was one, or return the\n PolicyNotFoundException. This is because\n GetResourcePolicy uses an eventually consistent query, and the\n metadata for your policy or table might not be available at that moment. Wait for a\n few seconds, and then try the GetResourcePolicy request again.

\n
" } }, "com.amazonaws.dynamodb#PutResourcePolicyInput": { @@ -8686,28 +8686,28 @@ "ResourceArn": { "target": "com.amazonaws.dynamodb#ResourceArnString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy will be attached. The resources you can specify include tables and streams.

\n

You can control index permissions using the base table's policy. To specify the same permission level for your table and its indexes, you can provide both the table and index Amazon Resource Name (ARN)s in the Resource field of a given Statement in your policy document. Alternatively, to specify different permissions for your table, indexes, or both, you can define multiple Statement fields in your policy document.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy will be attached.\n The resources you can specify include tables and streams.

\n

You can control index permissions using the base table's policy. To specify the same permission level for your table and its indexes, you can provide both the table and index Amazon Resource Name (ARN)s in the Resource field of a given Statement in your policy document. Alternatively, to specify different permissions for your table, indexes, or both, you can define multiple Statement fields in your policy document.

", "smithy.api#required": {} } }, "Policy": { "target": "com.amazonaws.dynamodb#ResourcePolicy", "traits": { - "smithy.api#documentation": "

An Amazon Web Services resource-based policy document in JSON format.

\n
    \n
  • \n

    The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit.

    \n
  • \n
  • \n

    Within a resource-based policy, if the action for a DynamoDB service-linked role (SLR) to replicate data for a global table is denied, adding or deleting a replica will fail with an error.

    \n
  • \n
\n

For a full list of all considerations that apply while attaching a resource-based policy, see Resource-based policy considerations.

", + "smithy.api#documentation": "

An Amazon Web Services resource-based policy document in JSON format.

\n
    \n
  • \n

    The maximum size supported for a resource-based policy document is 20 KB.\n DynamoDB counts whitespaces when calculating the size of a policy\n against this limit.

    \n
  • \n
  • \n

    Within a resource-based policy, if the action for a DynamoDB\n service-linked role (SLR) to replicate data for a global table is denied, adding\n or deleting a replica will fail with an error.

    \n
  • \n
\n

For a full list of all considerations that apply while attaching a resource-based\n policy, see Resource-based\n policy considerations.

", "smithy.api#required": {} } }, "ExpectedRevisionId": { "target": "com.amazonaws.dynamodb#PolicyRevisionId", "traits": { - "smithy.api#documentation": "

A string value that you can use to conditionally update your policy. You can provide the revision ID of your existing policy to make mutating requests against that policy.

\n \n

When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, your request will be rejected with a PolicyNotFoundException.

\n
\n

To conditionally attach a policy when no policy exists for the resource, specify NO_POLICY for the revision ID.

" + "smithy.api#documentation": "

A string value that you can use to conditionally update your policy. You can provide\n the revision ID of your existing policy to make mutating requests against that\n policy.

\n \n

When you provide an expected revision ID, if the revision ID of the existing\n policy on the resource doesn't match or if there's no policy attached to the\n resource, your request will be rejected with a\n PolicyNotFoundException.

\n
\n

To conditionally attach a policy when no policy exists for the resource, specify\n NO_POLICY for the revision ID.

" } }, "ConfirmRemoveSelfResourceAccess": { "target": "com.amazonaws.dynamodb#ConfirmRemoveSelfResourceAccess", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Set this parameter to true to confirm that you want to remove your permissions to change the policy of this resource in the future.

", + "smithy.api#documentation": "

Set this parameter to true to confirm that you want to remove your\n permissions to change the policy of this resource in the future.

", "smithy.api#httpHeader": "x-amz-confirm-remove-self-resource-access" } } @@ -8805,7 +8805,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table containing the requested items. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table containing the requested items. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -8818,7 +8818,7 @@ "Select": { "target": "com.amazonaws.dynamodb#Select", "traits": { - "smithy.api#documentation": "

The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.

\n
    \n
  • \n

    \n ALL_ATTRIBUTES - Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.

    \n
  • \n
  • \n

    \n ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES.

    \n
  • \n
  • \n

    \n COUNT - Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read capacity units \n as getting the items, and is subject to the same item size calculations.

    \n
  • \n
  • \n

    \n SPECIFIC_ATTRIBUTES - Returns only the attributes listed in\n ProjectionExpression. This return value is equivalent to\n specifying ProjectionExpression without specifying any value for\n Select.

    \n

    If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation will read only the index and not\n the table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.

    \n

    If you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.

    \n
  • \n
\n

If neither Select nor ProjectionExpression are specified,\n DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both\n Select and ProjectionExpression together in a single\n request, unless the value for Select is SPECIFIC_ATTRIBUTES.\n (This usage is equivalent to specifying ProjectionExpression without any\n value for Select.)

\n \n

If you use the ProjectionExpression parameter, then the value for\n Select can only be SPECIFIC_ATTRIBUTES. Any other\n value for Select will return an error.

\n
" + "smithy.api#documentation": "

The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.

\n
    \n
  • \n

    \n ALL_ATTRIBUTES - Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.

    \n
  • \n
  • \n

    \n ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES.

    \n
  • \n
  • \n

    \n COUNT - Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read\n capacity units as getting the items, and is subject to the same item size\n calculations.

    \n
  • \n
  • \n

    \n SPECIFIC_ATTRIBUTES - Returns only the attributes listed in\n ProjectionExpression. This return value is equivalent to\n specifying ProjectionExpression without specifying any value for\n Select.

    \n

    If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation will read only the index and not\n the table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.

    \n

    If you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.

    \n
  • \n
\n

If neither Select nor ProjectionExpression are specified,\n DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both\n Select and ProjectionExpression together in a single\n request, unless the value for Select is SPECIFIC_ATTRIBUTES.\n (This usage is equivalent to specifying ProjectionExpression without any\n value for Select.)

\n \n

If you use the ProjectionExpression parameter, then the value for\n Select can only be SPECIFIC_ATTRIBUTES. Any other\n value for Select will return an error.

\n
" } }, "AttributesToGet": { @@ -8881,7 +8881,7 @@ "FilterExpression": { "target": "com.amazonaws.dynamodb#ConditionExpression", "traits": { - "smithy.api#documentation": "

A string that contains conditions that DynamoDB applies after the Query\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression criteria are not returned.

\n

A FilterExpression does not allow key attributes. You cannot define a\n filter expression based on a partition key or a sort key.

\n \n

A FilterExpression is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.

\n
\n

For more information, see Filter Expressions in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

A string that contains conditions that DynamoDB applies after the Query\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression criteria are not returned.

\n

A FilterExpression does not allow key attributes. You cannot define a\n filter expression based on a partition key or a sort key.

\n \n

A FilterExpression is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.

\n
\n

For more information, see Filter\n Expressions in the Amazon DynamoDB Developer\n Guide.

" } }, "KeyConditionExpression": { @@ -8928,7 +8928,7 @@ "target": "com.amazonaws.dynamodb#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of items evaluated, before any QueryFilter is applied. A high\n ScannedCount value with few, or no, Count results\n indicates an inefficient Query operation. For more information, see Count and\n ScannedCount in the Amazon DynamoDB Developer\n Guide.

\n

If you did not use a filter in the request, then ScannedCount is the same\n as Count.

" + "smithy.api#documentation": "

The number of items evaluated, before any QueryFilter is applied. A high\n ScannedCount value with few, or no, Count results\n indicates an inefficient Query operation. For more information, see Count and\n ScannedCount in the Amazon DynamoDB Developer\n Guide.

\n

If you did not use a filter in the request, then ScannedCount is the same\n as Count.

" } }, "LastEvaluatedKey": { @@ -8940,7 +8940,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the Query operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the Query operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon\n DynamoDB Developer Guide.

" } } }, @@ -9824,7 +9824,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Restores the specified table to the specified point in time within\n EarliestRestorableDateTime and LatestRestorableDateTime.\n You can restore your table to any point in time during the last 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.

\n

When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.

\n

Along with data, the following are also included on the new restored table using\n point in time recovery:

\n
    \n
  • \n

    Global secondary indexes (GSIs)

    \n
  • \n
  • \n

    Local secondary indexes (LSIs)

    \n
  • \n
  • \n

    Provisioned read and write capacity

    \n
  • \n
  • \n

    Encryption settings

    \n \n

    All these settings come from the current settings of the source table at\n the time of restore.

    \n
    \n
  • \n
\n

You must manually set up the following on the restored table:

\n
    \n
  • \n

    Auto scaling policies

    \n
  • \n
  • \n

    IAM policies

    \n
  • \n
  • \n

    Amazon CloudWatch metrics and alarms

    \n
  • \n
  • \n

    Tags

    \n
  • \n
  • \n

    Stream settings

    \n
  • \n
  • \n

    Time to Live (TTL) settings

    \n
  • \n
  • \n

    Point in time recovery settings

    \n
  • \n
" + "smithy.api#documentation": "

Restores the specified table to the specified point in time within\n EarliestRestorableDateTime and LatestRestorableDateTime.\n You can restore your table to any point in time during the last 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.

\n

When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.

\n

Along with data, the following are also included on the new restored table using point\n in time recovery:

\n
    \n
  • \n

    Global secondary indexes (GSIs)

    \n
  • \n
  • \n

    Local secondary indexes (LSIs)

    \n
  • \n
  • \n

    Provisioned read and write capacity

    \n
  • \n
  • \n

    Encryption settings

    \n \n

    All these settings come from the current settings of the source table at\n the time of restore.

    \n
    \n
  • \n
\n

You must manually set up the following on the restored table:

\n
    \n
  • \n

    Auto scaling policies

    \n
  • \n
  • \n

    IAM policies

    \n
  • \n
  • \n

    Amazon CloudWatch metrics and alarms

    \n
  • \n
  • \n

    Tags

    \n
  • \n
  • \n

    Stream settings

    \n
  • \n
  • \n

    Time to Live (TTL) settings

    \n
  • \n
  • \n

    Point in time recovery settings

    \n
  • \n
" } }, "com.amazonaws.dynamodb#RestoreTableToPointInTimeInput": { @@ -10251,7 +10251,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

The Scan operation returns one or more items and item attributes by\n accessing every item in a table or a secondary index. To have DynamoDB return fewer\n items, you can provide a FilterExpression operation.

\n

If the total size of scanned items exceeds the maximum dataset size limit of 1 MB,\n the scan completes and results are returned to the user. The LastEvaluatedKey \n value is also returned and the requestor can use the LastEvaluatedKey to continue \n the scan in a subsequent operation. Each scan response also includes number of items that were \n scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result \n can result in no items meeting the criteria and the Count will result in zero. If \n you did not use a FilterExpression in the scan request, then Count is \n the same as ScannedCount.

\n \n

\n Count and ScannedCount only return the count of items specific to a \n single scan request and, unless the table is less than 1MB, do not represent the total number \n of items in the table.\n

\n
\n

A single Scan operation first reads up to the maximum number of items set (if\n using the Limit parameter) or a maximum of 1 MB of data and then applies any\n filtering to the results if a FilterExpression is provided. If\n LastEvaluatedKey is present in the response, pagination is required to complete the\n full table scan. For more information, see Paginating the\n Results in the Amazon DynamoDB Developer Guide.

\n

\n Scan operations proceed sequentially; however, for faster performance on\n a large table or secondary index, applications can request a parallel Scan\n operation by providing the Segment and TotalSegments\n parameters. For more information, see Parallel\n Scan in the Amazon DynamoDB Developer Guide.

\n

By default, a Scan uses eventually consistent reads when accessing the items in a table. \n Therefore, the results from an eventually consistent Scan may not include the latest item \n changes at the time the scan iterates through each item in the table. If you require a strongly consistent \n read of each item as the scan iterates through the items in the table, you can set the ConsistentRead \n parameter to true. Strong consistency only relates to the consistency of the read at the item level.

\n \n

\n DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead \n parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan \n see a consistent snapshot of the table when the scan operation was requested.\n

\n
", + "smithy.api#documentation": "

The Scan operation returns one or more items and item attributes by\n accessing every item in a table or a secondary index. To have DynamoDB return fewer\n items, you can provide a FilterExpression operation.

\n

If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the\n scan completes and results are returned to the user. The LastEvaluatedKey\n value is also returned and the requestor can use the LastEvaluatedKey to\n continue the scan in a subsequent operation. Each scan response also includes number of\n items that were scanned (ScannedCount) as part of the request. If using a\n FilterExpression, a scan result can result in no items meeting the\n criteria and the Count will result in zero. If you did not use a\n FilterExpression in the scan request, then Count is the\n same as ScannedCount.

\n \n

\n Count and ScannedCount only return the count of items\n specific to a single scan request and, unless the table is less than 1MB, do not\n represent the total number of items in the table.

\n
\n

A single Scan operation first reads up to the maximum number of items set\n (if using the Limit parameter) or a maximum of 1 MB of data and then\n applies any filtering to the results if a FilterExpression is provided. If\n LastEvaluatedKey is present in the response, pagination is required to\n complete the full table scan. For more information, see Paginating the\n Results in the Amazon DynamoDB Developer Guide.

\n

\n Scan operations proceed sequentially; however, for faster performance on\n a large table or secondary index, applications can request a parallel Scan\n operation by providing the Segment and TotalSegments\n parameters. For more information, see Parallel\n Scan in the Amazon DynamoDB Developer Guide.

\n

By default, a Scan uses eventually consistent reads when accessing the\n items in a table. Therefore, the results from an eventually consistent Scan\n may not include the latest item changes at the time the scan iterates through each item\n in the table. If you require a strongly consistent read of each item as the scan\n iterates through the items in the table, you can set the ConsistentRead\n parameter to true. Strong consistency only relates to the consistency of the read at the\n item level.

\n \n

DynamoDB does not provide snapshot isolation for a scan operation when the\n ConsistentRead parameter is set to true. Thus, a DynamoDB scan\n operation does not guarantee that all reads in a scan see a consistent snapshot of\n the table when the scan operation was requested.

\n
", "smithy.api#examples": [ { "title": "To scan a table", @@ -10312,7 +10312,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table containing the requested items or if you provide\n IndexName, the name of the table to which that index belongs.

\n

You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table containing the requested items or if you provide\n IndexName, the name of the table to which that index belongs.

\n

You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -10337,7 +10337,7 @@ "Select": { "target": "com.amazonaws.dynamodb#Select", "traits": { - "smithy.api#documentation": "

The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.

\n
    \n
  • \n

    \n ALL_ATTRIBUTES - Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.

    \n
  • \n
  • \n

    \n ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES.

    \n
  • \n
  • \n

    \n COUNT - Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read capacity units \n as getting the items, and is subject to the same item size calculations.

    \n
  • \n
  • \n

    \n SPECIFIC_ATTRIBUTES - Returns only the attributes listed in\n ProjectionExpression. This return value is equivalent to\n specifying ProjectionExpression without specifying any value for\n Select.

    \n

    If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation reads only the index and not the\n table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.

    \n

    If you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.

    \n
  • \n
\n

If neither Select nor ProjectionExpression are specified,\n DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both\n Select and ProjectionExpression together in a single\n request, unless the value for Select is SPECIFIC_ATTRIBUTES.\n (This usage is equivalent to specifying ProjectionExpression without any\n value for Select.)

\n \n

If you use the ProjectionExpression parameter, then the value for\n Select can only be SPECIFIC_ATTRIBUTES. Any other\n value for Select will return an error.

\n
" + "smithy.api#documentation": "

The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.

\n
    \n
  • \n

    \n ALL_ATTRIBUTES - Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.

    \n
  • \n
  • \n

    \n ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES.

    \n
  • \n
  • \n

    \n COUNT - Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read\n capacity units as getting the items, and is subject to the same item size\n calculations.

    \n
  • \n
  • \n

    \n SPECIFIC_ATTRIBUTES - Returns only the attributes listed in\n ProjectionExpression. This return value is equivalent to\n specifying ProjectionExpression without specifying any value for\n Select.

    \n

    If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation reads only the index and not the\n table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.

    \n

    If you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.

    \n
  • \n
\n

If neither Select nor ProjectionExpression are specified,\n DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both\n Select and ProjectionExpression together in a single\n request, unless the value for Select is SPECIFIC_ATTRIBUTES.\n (This usage is equivalent to specifying ProjectionExpression without any\n value for Select.)

\n \n

If you use the ProjectionExpression parameter, then the value for\n Select can only be SPECIFIC_ATTRIBUTES. Any other\n value for Select will return an error.

\n
" } }, "ScanFilter": { @@ -10382,7 +10382,7 @@ "FilterExpression": { "target": "com.amazonaws.dynamodb#ConditionExpression", "traits": { - "smithy.api#documentation": "

A string that contains conditions that DynamoDB applies after the Scan\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression criteria are not returned.

\n \n

A FilterExpression is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.

\n
\n

For more information, see Filter Expressions in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

A string that contains conditions that DynamoDB applies after the Scan\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression criteria are not returned.

\n \n

A FilterExpression is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.

\n
\n

For more information, see Filter\n Expressions in the Amazon DynamoDB Developer\n Guide.

" } }, "ExpressionAttributeNames": { @@ -10441,7 +10441,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the Scan operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see \n Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the Scan operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon\n DynamoDB Developer Guide.

" } } }, @@ -11688,7 +11688,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.

", "smithy.api#required": {} } }, @@ -11744,7 +11744,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.

", "smithy.api#required": {} } }, @@ -11854,7 +11854,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Adds or removes replicas in the specified global table. The global table must already\n exist to be able to use this operation. Any replica to be added must be empty, have the\n same name as the global table, have the same key schema, have DynamoDB Streams enabled,\n and have the same provisioned and maximum write capacity units.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
\n \n

\n For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version\n 2019.11.21 you can use UpdateTable instead.\n

\n

\n Although you can use UpdateGlobalTable to add replicas and remove\n replicas in a single request, for simplicity we recommend that you issue separate\n requests for adding or removing replicas.\n

\n
\n

If global secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The global secondary indexes must have the same name.

    \n
  • \n
  • \n

    The global secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
  • \n

    The global secondary indexes must have the same provisioned and maximum write\n capacity units.

    \n
  • \n
" + "smithy.api#documentation": "

Adds or removes replicas in the specified global table. The global table must already\n exist to be able to use this operation. Any replica to be added must be empty, have the\n same name as the global table, have the same key schema, have DynamoDB Streams enabled,\n and have the same provisioned and maximum write capacity units.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
\n \n

For global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version). If you are using global tables Version\n 2019.11.21 you can use UpdateTable instead.

\n

Although you can use UpdateGlobalTable to add replicas and remove\n replicas in a single request, for simplicity we recommend that you issue separate\n requests for adding or removing replicas.

\n
\n

If global secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The global secondary indexes must have the same name.

    \n
  • \n
  • \n

    The global secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
  • \n

    The global secondary indexes must have the same provisioned and maximum write\n capacity units.

    \n
  • \n
" } }, "com.amazonaws.dynamodb#UpdateGlobalTableInput": { @@ -12090,7 +12090,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table containing the item to update. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table containing the item to update. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -12122,7 +12122,7 @@ "ReturnValues": { "target": "com.amazonaws.dynamodb#ReturnValue", "traits": { - "smithy.api#documentation": "

Use ReturnValues if you want to get the item attributes as they appear\n before or after they are successfully updated. For UpdateItem, the valid values\n are:

\n
    \n
  • \n

    \n NONE - If ReturnValues is not specified, or if its\n value is NONE, then nothing is returned. (This setting is the\n default for ReturnValues.)

    \n
  • \n
  • \n

    \n ALL_OLD - Returns all of the attributes of the item, as they\n appeared before the UpdateItem operation.

    \n
  • \n
  • \n

    \n UPDATED_OLD - Returns only the updated attributes, as they appeared\n before the UpdateItem operation.

    \n
  • \n
  • \n

    \n ALL_NEW - Returns all of the attributes of the item, as they appear\n after the UpdateItem operation.

    \n
  • \n
  • \n

    \n UPDATED_NEW - Returns only the updated attributes, as they appear\n after the UpdateItem operation.

    \n
  • \n
\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

\n

The values returned are strongly consistent.

" + "smithy.api#documentation": "

Use ReturnValues if you want to get the item attributes as they appear\n before or after they are successfully updated. For UpdateItem, the valid\n values are:

\n
    \n
  • \n

    \n NONE - If ReturnValues is not specified, or if its\n value is NONE, then nothing is returned. (This setting is the\n default for ReturnValues.)

    \n
  • \n
  • \n

    \n ALL_OLD - Returns all of the attributes of the item, as they\n appeared before the UpdateItem operation.

    \n
  • \n
  • \n

    \n UPDATED_OLD - Returns only the updated attributes, as they appeared\n before the UpdateItem operation.

    \n
  • \n
  • \n

    \n ALL_NEW - Returns all of the attributes of the item, as they appear\n after the UpdateItem operation.

    \n
  • \n
  • \n

    \n UPDATED_NEW - Returns only the updated attributes, as they appear\n after the UpdateItem operation.

    \n
  • \n
\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

\n

The values returned are strongly consistent.

" } }, "ReturnConsumedCapacity": { @@ -12161,7 +12161,7 @@ "ReturnValuesOnConditionCheckFailure": { "target": "com.amazonaws.dynamodb#ReturnValuesOnConditionCheckFailure", "traits": { - "smithy.api#documentation": "

An optional parameter that returns the item attributes for an UpdateItem operation that failed a\n condition check.

\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

" + "smithy.api#documentation": "

An optional parameter that returns the item attributes for an UpdateItem\n operation that failed a condition check.

\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

" } } }, @@ -12176,13 +12176,13 @@ "Attributes": { "target": "com.amazonaws.dynamodb#AttributeMap", "traits": { - "smithy.api#documentation": "

A map of attribute values as they appear before or after the UpdateItem\n operation, as determined by the ReturnValues parameter.

\n

The Attributes map is only present if the update was successful and ReturnValues was\n specified as something other than NONE in the request. Each element\n represents one attribute.

" + "smithy.api#documentation": "

A map of attribute values as they appear before or after the UpdateItem\n operation, as determined by the ReturnValues parameter.

\n

The Attributes map is only present if the update was successful and\n ReturnValues was specified as something other than NONE in\n the request. Each element represents one attribute.

" } }, "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the UpdateItem operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity is\n only returned if the ReturnConsumedCapacity parameter was specified. For\n more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the UpdateItem operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity is\n only returned if the ReturnConsumedCapacity parameter was specified. For\n more information, see Capacity unity consumption for write operations in the Amazon\n DynamoDB Developer Guide.

" } }, "ItemCollectionMetrics": { @@ -12249,7 +12249,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The table name for the Kinesis streaming destination input. You can also provide the ARN of the table in this parameter.

", + "smithy.api#documentation": "

The table name for the Kinesis streaming destination input. You can also provide the\n ARN of the table in this parameter.

", "smithy.api#required": {} } }, @@ -12377,7 +12377,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB\n Streams settings for a given table.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
\n

You can only perform one of the following operations at once:

\n
    \n
  • \n

    Modify the provisioned throughput settings of the table.

    \n
  • \n
  • \n

    Remove a global secondary index from the table.

    \n
  • \n
  • \n

    Create a new global secondary index on the table. After the index begins\n backfilling, you can use UpdateTable to perform other\n operations.

    \n
  • \n
\n

\n UpdateTable is an asynchronous operation; while it's executing, the table\n status changes from ACTIVE to UPDATING. While it's\n UPDATING, you can't issue another UpdateTable request.\n When the table returns to the ACTIVE state, the UpdateTable\n operation is complete.

" + "smithy.api#documentation": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB\n Streams settings for a given table.

\n \n

For global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).

\n
\n

You can only perform one of the following operations at once:

\n
    \n
  • \n

    Modify the provisioned throughput settings of the table.

    \n
  • \n
  • \n

    Remove a global secondary index from the table.

    \n
  • \n
  • \n

    Create a new global secondary index on the table. After the index begins\n backfilling, you can use UpdateTable to perform other\n operations.

    \n
  • \n
\n

\n UpdateTable is an asynchronous operation; while it's executing, the table\n status changes from ACTIVE to UPDATING. While it's\n UPDATING, you can't issue another UpdateTable request.\n When the table returns to the ACTIVE state, the UpdateTable\n operation is complete.

" } }, "com.amazonaws.dynamodb#UpdateTableInput": { @@ -12392,7 +12392,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to be updated. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to be updated. You can also provide the Amazon Resource Name (ARN) of the table\n in this parameter.

", "smithy.api#required": {} } }, @@ -12429,7 +12429,7 @@ "ReplicaUpdates": { "target": "com.amazonaws.dynamodb#ReplicationGroupUpdateList", "traits": { - "smithy.api#documentation": "

A list of replica update actions (create, delete, or update) for the table.

\n \n

For global tables, this property only applies to global tables using Version 2019.11.21 (Current version). \n

\n
" + "smithy.api#documentation": "

A list of replica update actions (create, delete, or update) for the table.

\n \n

For global tables, this property only applies to global tables using Version\n 2019.11.21 (Current version).

\n
" } }, "TableClass": { @@ -12441,13 +12441,13 @@ "DeletionProtectionEnabled": { "target": "com.amazonaws.dynamodb#DeletionProtectionEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table.

" + "smithy.api#documentation": "

Indicates whether deletion protection is to be enabled (true) or disabled (false) on\n the table.

" } }, "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

Updates the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

Updates the maximum number of read and write units for the specified table in\n on-demand capacity mode. If you use this parameter, you must specify\n MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } } }, @@ -12494,7 +12494,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates auto scaling settings on your global tables at once.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
" + "smithy.api#documentation": "

Updates auto scaling settings on your global tables at once.

\n \n

For global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).

\n
" } }, "com.amazonaws.dynamodb#UpdateTableReplicaAutoScalingInput": { @@ -12509,7 +12509,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the global table to be updated. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the global table to be updated. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.

", "smithy.api#required": {} } }, @@ -12579,7 +12579,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table to be configured. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table to be configured. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.

", "smithy.api#required": {} } }, diff --git a/models/ec2.json b/models/ec2.json index 20f2ebe277..7e0de0da2b 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -2303,6 +2303,9 @@ { "target": "com.amazonaws.ec2#CreateCapacityReservation" }, + { + "target": "com.amazonaws.ec2#CreateCapacityReservationBySplitting" + }, { "target": "com.amazonaws.ec2#CreateCapacityReservationFleet" }, @@ -3809,6 +3812,9 @@ { "target": "com.amazonaws.ec2#MoveByoipCidrToIpam" }, + { + "target": "com.amazonaws.ec2#MoveCapacityReservationInstances" + }, { "target": "com.amazonaws.ec2#ProvisionByoipCidr" }, @@ -5353,7 +5359,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "DestinationPrefixListId", - "smithy.api#documentation": "

The prefix of the Amazon Web Service.

", + "smithy.api#documentation": "

The prefix of the Amazon Web Services service.

", "smithy.api#xmlName": "destinationPrefixListId" } }, @@ -13908,6 +13914,94 @@ "smithy.api#documentation": "

Creates a new Capacity Reservation with the specified attributes.

\n

Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This \n\t\t\tgives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. \n\t\t\tBy creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. \n\t\t\tFor more information, see Capacity Reservations in the Amazon EC2 User Guide.

\n

Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to\n\t\t\tfulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try\n\t\t\tagain at a later time, try in a different Availability Zone, or request a smaller\n\t\t\tcapacity reservation. If your application is flexible across instance types and sizes,\n\t\t\ttry to create a Capacity Reservation with different instance attributes.

\n

Your request could also fail if the requested quantity exceeds your On-Demand Instance\n\t\t\tlimit for the selected instance type. If your request fails due to limit constraints,\n\t\t\tincrease your On-Demand Instance limit for the required instance type and try again. For\n\t\t\tmore information about increasing your instance limits, see Amazon EC2 Service\n\t\t\t\tQuotas in the Amazon EC2 User Guide.

" } }, + "com.amazonaws.ec2#CreateCapacityReservationBySplitting": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#CreateCapacityReservationBySplittingRequest" + }, + "output": { + "target": "com.amazonaws.ec2#CreateCapacityReservationBySplittingResult" + }, + "traits": { + "smithy.api#documentation": "

\n\t\t\tCreate a new Capacity Reservation by splitting the available capacity of the source Capacity Reservation. The new Capacity Reservation will have the same attributes as the source Capacity Reservation except for tags. The source Capacity Reservation must be active and owned by your Amazon Web Services account.\n\t\t

" + } + }, + "com.amazonaws.ec2#CreateCapacityReservationBySplittingRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", + "smithy.api#idempotencyToken": {} + } + }, + "SourceCapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

\n\t\t\tThe ID of the Capacity Reservation from which you want to split the available capacity. \n\t\t

", + "smithy.api#required": {} + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

\n\t\t\tThe number of instances to split from the source Capacity Reservation.\n\t\t

", + "smithy.api#required": {} + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

\n\t\t\tThe tags to apply to the new Capacity Reservation.\n\t\t

", + "smithy.api#xmlName": "TagSpecification" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateCapacityReservationBySplittingResult": { + "type": "structure", + "members": { + "SourceCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "SourceCapacityReservation", + "smithy.api#documentation": "

\n\t\t\tInformation about the source Capacity Reservation.\n\t\t

", + "smithy.api#xmlName": "sourceCapacityReservation" + } + }, + "DestinationCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "DestinationCapacityReservation", + "smithy.api#documentation": "

\n\t\t\tInformation about the destination Capacity Reservation.\n\t\t

", + "smithy.api#xmlName": "destinationCapacityReservation" + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "InstanceCount", + "smithy.api#documentation": "

\n\t\t\tThe number of instances in the new Capacity Reservation. The number of instances in the source Capacity Reservation was reduced by this amount.\n\t\t

", + "smithy.api#xmlName": "instanceCount" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#CreateCapacityReservationFleet": { "type": "operation", "input": { @@ -16156,6 +16250,12 @@ "traits": { "smithy.api#documentation": "

IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.

" } + }, + "EnablePrivateGua": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.

" + } } }, "traits": { @@ -17603,7 +17703,7 @@ "AwsService": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Service. Currently not supported.

" + "smithy.api#documentation": "

The Amazon Web Services service. Currently not supported.

" } }, "Permission": { @@ -27125,7 +27225,7 @@ "target": "com.amazonaws.ec2#DescribeAddressTransfersResult" }, "traits": { - "smithy.api#documentation": "

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

\n

When you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for three days\n after the transfers have been accepted.

", + "smithy.api#documentation": "

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

\n

When you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for 14 days\n after the transfers have been accepted.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -35518,7 +35618,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n network-interface-permission.network-interface-permission-id - The ID of the\n\t\t\t\tpermission.

    \n
  • \n
  • \n

    \n network-interface-permission.network-interface-id - The ID of\n\t\t\t\t\tthe network interface.

    \n
  • \n
  • \n

    \n network-interface-permission.aws-account-id - The Amazon Web Services account ID.

    \n
  • \n
  • \n

    \n network-interface-permission.aws-service - The Amazon Web Service.

    \n
  • \n
  • \n

    \n network-interface-permission.permission - The type of\n\t\t\t\t\tpermission (INSTANCE-ATTACH |\n\t\t\t\t\tEIP-ASSOCIATE).

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n network-interface-permission.network-interface-permission-id - The ID of the\n\t\t\t\tpermission.

    \n
  • \n
  • \n

    \n network-interface-permission.network-interface-id - The ID of\n\t\t\t\t\tthe network interface.

    \n
  • \n
  • \n

    \n network-interface-permission.aws-account-id - The Amazon Web Services account ID.

    \n
  • \n
  • \n

    \n network-interface-permission.aws-service - The Amazon Web Services service.

    \n
  • \n
  • \n

    \n network-interface-permission.permission - The type of\n\t\t\t\t\tpermission (INSTANCE-ATTACH |\n\t\t\t\t\tEIP-ASSOCIATE).

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -35689,7 +35789,7 @@ "target": "com.amazonaws.ec2#FilterList", "traits": { "aws.protocols#ec2QueryName": "Filter", - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | \n\t\t ec2_instance_connect_endpoint | efa | efs | \n\t\t gateway_load_balancer | gateway_load_balancer_endpoint | \n\t\t global_accelerator_managed | \n\t\t interface | iot_rules_managed | \n\t\t lambda | load_balancer | \n\t\t nat_gateway | network_load_balancer | \n\t\t quicksight | \n\t\t transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service \n\t\t (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | \n\t\t ec2_instance_connect_endpoint | efa | efs | \n\t\t gateway_load_balancer | gateway_load_balancer_endpoint | \n\t\t global_accelerator_managed | \n\t\t interface | iot_rules_managed | \n\t\t lambda | load_balancer | \n\t\t nat_gateway | network_load_balancer | \n\t\t quicksight | \n\t\t transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service \n\t\t (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", "smithy.api#xmlName": "filter" } }, @@ -36710,7 +36810,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n association.gateway-id - The ID of the gateway involved in the\n\t\t association.

    \n
  • \n
  • \n

    \n association.route-table-association-id - The ID of an association\n ID for the route table.

    \n
  • \n
  • \n

    \n association.route-table-id - The ID of the route table involved in\n the association.

    \n
  • \n
  • \n

    \n association.subnet-id - The ID of the subnet involved in the\n association.

    \n
  • \n
  • \n

    \n association.main - Indicates whether the route table is the main\n route table for the VPC (true | false). Route tables\n that do not have an association ID are not returned in the response.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the route table.

    \n
  • \n
  • \n

    \n route-table-id - The ID of the route table.

    \n
  • \n
  • \n

    \n route.destination-cidr-block - The IPv4 CIDR range specified in a\n route in the table.

    \n
  • \n
  • \n

    \n route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Service\n specified in a route in the table.

    \n
  • \n
  • \n

    \n route.egress-only-internet-gateway-id - The ID of an\n egress-only Internet gateway specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.gateway-id - The ID of a gateway specified in a route in the table.

    \n
  • \n
  • \n

    \n route.instance-id - The ID of an instance specified in a route in the table.

    \n
  • \n
  • \n

    \n route.nat-gateway-id - The ID of a NAT gateway.

    \n
  • \n
  • \n

    \n route.transit-gateway-id - The ID of a transit gateway.

    \n
  • \n
  • \n

    \n route.origin - Describes how the route was created. \n CreateRouteTable indicates that the route was automatically\n created when the route table was created; CreateRoute indicates\n that the route was manually added to the route table;\n EnableVgwRoutePropagation indicates that the route was\n propagated by route propagation.

    \n
  • \n
  • \n

    \n route.state - The state of a route in the route table\n (active | blackhole). The blackhole state\n indicates that the route's target isn't available (for example, the specified\n gateway isn't attached to the VPC, the specified NAT instance has been\n terminated, and so on).

    \n
  • \n
  • \n

    \n route.vpc-peering-connection-id - The ID of a VPC peering\n\t\t connection specified in a route in the table.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the route table.

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n association.gateway-id - The ID of the gateway involved in the\n\t\t association.

    \n
  • \n
  • \n

    \n association.route-table-association-id - The ID of an association\n ID for the route table.

    \n
  • \n
  • \n

    \n association.route-table-id - The ID of the route table involved in\n the association.

    \n
  • \n
  • \n

    \n association.subnet-id - The ID of the subnet involved in the\n association.

    \n
  • \n
  • \n

    \n association.main - Indicates whether the route table is the main\n route table for the VPC (true | false). Route tables\n that do not have an association ID are not returned in the response.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the route table.

    \n
  • \n
  • \n

    \n route-table-id - The ID of the route table.

    \n
  • \n
  • \n

    \n route.destination-cidr-block - The IPv4 CIDR range specified in a\n route in the table.

    \n
  • \n
  • \n

    \n route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Services service\n specified in a route in the table.

    \n
  • \n
  • \n

    \n route.egress-only-internet-gateway-id - The ID of an\n egress-only Internet gateway specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.gateway-id - The ID of a gateway specified in a route in the table.

    \n
  • \n
  • \n

    \n route.instance-id - The ID of an instance specified in a route in the table.

    \n
  • \n
  • \n

    \n route.nat-gateway-id - The ID of a NAT gateway.

    \n
  • \n
  • \n

    \n route.transit-gateway-id - The ID of a transit gateway.

    \n
  • \n
  • \n

    \n route.origin - Describes how the route was created. \n CreateRouteTable indicates that the route was automatically\n created when the route table was created; CreateRoute indicates\n that the route was manually added to the route table;\n EnableVgwRoutePropagation indicates that the route was\n propagated by route propagation.

    \n
  • \n
  • \n

    \n route.state - The state of a route in the route table\n (active | blackhole). The blackhole state\n indicates that the route's target isn't available (for example, the specified\n gateway isn't attached to the VPC, the specified NAT instance has been\n terminated, and so on).

    \n
  • \n
  • \n

    \n route.vpc-peering-connection-id - The ID of a VPC peering\n\t\t connection specified in a route in the table.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the route table.

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -50177,7 +50277,7 @@ "target": "com.amazonaws.ec2#ImageId", "traits": { "aws.protocols#ec2QueryName": "ImageId", - "smithy.api#documentation": "

The ID of the AMI. An AMI is required to launch an instance. This parameter is only\n available for fleets of type instant. For fleets of type maintain\n and request, you must specify the AMI ID in the launch template.

", + "smithy.api#documentation": "

The ID of the AMI in the format ami-17characters00000.

\n

Alternatively, you can specify a Systems Manager parameter, using one of the following\n formats. The Systems Manager parameter will resolve to an AMI ID on launch.

\n

To reference a public parameter:

\n
    \n
  • \n

    \n resolve:ssm:public-parameter\n \n

    \n
  • \n
\n

To reference a parameter stored in the same account:

\n
    \n
  • \n

    \n resolve:ssm:parameter-name\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:version-number\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:label\n \n

    \n
  • \n
\n

To reference a parameter shared from another Amazon Web Services account:

\n
    \n
  • \n

    \n resolve:ssm:parameter-ARN\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-ARN:version-number\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-ARN:label\n \n

    \n
  • \n
\n

For more information, see Use a Systems Manager parameter instead of an AMI ID in the\n Amazon EC2 User Guide.

\n \n

This parameter is only available for fleets of type instant. For fleets\n of type maintain and request, you must specify the AMI ID in\n the launch template.

\n
", "smithy.api#xmlName": "imageId" } } @@ -50258,7 +50358,7 @@ "ImageId": { "target": "com.amazonaws.ec2#ImageId", "traits": { - "smithy.api#documentation": "

The ID of the AMI. An AMI is required to launch an instance. This parameter is only\n available for fleets of type instant. For fleets of type maintain\n and request, you must specify the AMI ID in the launch template.

" + "smithy.api#documentation": "

The ID of the AMI in the format ami-17characters00000.

\n

Alternatively, you can specify a Systems Manager parameter, using one of the following\n formats. The Systems Manager parameter will resolve to an AMI ID on launch.

\n

To reference a public parameter:

\n
    \n
  • \n

    \n resolve:ssm:public-parameter\n \n

    \n
  • \n
\n

To reference a parameter stored in the same account:

\n
    \n
  • \n

    \n resolve:ssm:parameter-name\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:version-number\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:label\n \n

    \n
  • \n
\n

To reference a parameter shared from another Amazon Web Services account:

\n
    \n
  • \n

    \n resolve:ssm:parameter-ARN\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-ARN:version-number\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-ARN:label\n \n

    \n
  • \n
\n

For more information, see Use a Systems Manager parameter instead of an AMI ID in the\n Amazon EC2 User Guide.

\n \n

This parameter is only available for fleets of type instant. For fleets\n of type maintain and request, you must specify the AMI ID in\n the launch template.

\n
" } } }, @@ -67226,6 +67326,29 @@ } } }, + "com.amazonaws.ec2#IpSource": { + "type": "enum", + "members": { + "amazon": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "amazon" + } + }, + "byoip": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "byoip" + } + }, + "none": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "none" + } + } + } + }, "com.amazonaws.ec2#Ipam": { "type": "structure", "members": { @@ -67356,6 +67479,14 @@ "smithy.api#documentation": "

IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.

", "smithy.api#xmlName": "tier" } + }, + "EnablePrivateGua": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "EnablePrivateGua", + "smithy.api#documentation": "

Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.

", + "smithy.api#xmlName": "enablePrivateGua" + } } }, "traits": { @@ -67846,6 +67977,14 @@ "smithy.api#xmlName": "resourceCidr" } }, + "IpSource": { + "target": "com.amazonaws.ec2#IpamResourceCidrIpSource", + "traits": { + "aws.protocols#ec2QueryName": "IpSource", + "smithy.api#documentation": "

The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space.

", + "smithy.api#xmlName": "ipSource" + } + }, "ResourceType": { "target": "com.amazonaws.ec2#IpamResourceType", "traits": { @@ -69233,6 +69372,29 @@ "smithy.api#documentation": "

The CIDR for an IPAM resource.

" } }, + "com.amazonaws.ec2#IpamResourceCidrIpSource": { + "type": "enum", + "members": { + "amazon": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "amazon" + } + }, + "byoip": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "byoip" + } + }, + "none": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "none" + } + } + } + }, "com.amazonaws.ec2#IpamResourceCidrSet": { "type": "list", "member": { @@ -70039,6 +70201,23 @@ "com.amazonaws.ec2#Ipv6Address": { "type": "string" }, + "com.amazonaws.ec2#Ipv6AddressAttribute": { + "type": "enum", + "members": { + "public": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "public" + } + }, + "private": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "private" + } + } + } + }, "com.amazonaws.ec2#Ipv6AddressList": { "type": "list", "member": { @@ -74821,7 +75000,7 @@ "target": "com.amazonaws.ec2#ModifyCapacityReservationResult" }, "traits": { - "smithy.api#documentation": "

Modifies a Capacity Reservation's capacity and the conditions under which it is to be released. You\n\t\t\tcannot change a Capacity Reservation's instance type, EBS optimization, instance store settings,\n\t\t\tplatform, Availability Zone, or instance eligibility. If you need to modify any of these\n\t\t\tattributes, we recommend that you cancel the Capacity Reservation, and then create a new one with\n\t\t\tthe required attributes.

" + "smithy.api#documentation": "

Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under which it is to be released. You\n\t\t can't modify a Capacity Reservation's instance type, EBS optimization, platform, instance store settings, Availability Zone, or\n\t\t tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with\n\t\t the required attributes. For more information, see Modify an active Capacity Reservation.

" } }, "com.amazonaws.ec2#ModifyCapacityReservationFleet": { @@ -74938,6 +75117,12 @@ "traits": { "smithy.api#documentation": "

Reserved for future use.

" } + }, + "InstanceMatchCriteria": { + "target": "com.amazonaws.ec2#InstanceMatchCriteria", + "traits": { + "smithy.api#documentation": "

\n\t\t\tThe matching criteria (instance eligibility) that you want to use in the modified Capacity Reservation. If you change the instance eligibility of an existing Capacity Reservation from targeted to open, \n\t\t\tany running instances that match the attributes of the Capacity Reservation, have the CapacityReservationPreference set to open, and \n\t\t\tare not yet running in the Capacity Reservation, will automatically use the modified Capacity Reservation.\n\t\t

\n

To modify the instance eligibility, the Capacity Reservation must be completely idle (zero usage).

" + } } }, "traits": { @@ -76580,6 +76765,12 @@ "traits": { "smithy.api#documentation": "

IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.

" } + }, + "EnablePrivateGua": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.

" + } } }, "traits": { @@ -80390,6 +80581,95 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#MoveCapacityReservationInstances": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#MoveCapacityReservationInstancesRequest" + }, + "output": { + "target": "com.amazonaws.ec2#MoveCapacityReservationInstancesResult" + }, + "traits": { + "smithy.api#documentation": "

Move available capacity from a source Capacity Reservation to a destination Capacity Reservation. The source Capacity Reservation and the destination Capacity Reservation must be active, owned by your Amazon Web Services account, and share the following:\n\t\t

\n
    \n
  • \n

    Instance type

    \n
  • \n
  • \n

    Platform

    \n
  • \n
  • \n

    Availability Zone

    \n
  • \n
  • \n

    Tenancy

    \n
  • \n
  • \n

    Placement group

    \n
  • \n
  • \n

    Capacity Reservation end time - At specific time or Manually.

    \n
  • \n
" + } + }, + "com.amazonaws.ec2#MoveCapacityReservationInstancesRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", + "smithy.api#idempotencyToken": {} + } + }, + "SourceCapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

\n\t\t\tThe ID of the Capacity Reservation from which you want to move capacity.\n\t\t

", + "smithy.api#required": {} + } + }, + "DestinationCapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

\n\t\t\tThe ID of the Capacity Reservation that you want to move capacity into.\n\t\t

", + "smithy.api#required": {} + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The number of instances that you want to move from the source Capacity Reservation.\n\t\t

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#MoveCapacityReservationInstancesResult": { + "type": "structure", + "members": { + "SourceCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "SourceCapacityReservation", + "smithy.api#documentation": "

\n\t\t\tInformation about the source Capacity Reservation.\n\t\t

", + "smithy.api#xmlName": "sourceCapacityReservation" + } + }, + "DestinationCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "DestinationCapacityReservation", + "smithy.api#documentation": "

\n\t\t\tInformation about the destination Capacity Reservation.\n\t\t

", + "smithy.api#xmlName": "destinationCapacityReservation" + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "InstanceCount", + "smithy.api#documentation": "

\n\t\t\tThe number of instances that were moved from the source Capacity Reservation to the destination Capacity Reservation.\n\t\t

", + "smithy.api#xmlName": "instanceCount" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#MoveStatus": { "type": "enum", "members": { @@ -82287,7 +82567,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "AwsService", - "smithy.api#documentation": "

The Amazon Web Service.

", + "smithy.api#documentation": "

The Amazon Web Services service.

", "smithy.api#xmlName": "awsService" } }, @@ -84516,7 +84796,7 @@ "target": "com.amazonaws.ec2#ValueStringList", "traits": { "aws.protocols#ec2QueryName": "CidrSet", - "smithy.api#documentation": "

The IP address range of the Amazon Web Service.

", + "smithy.api#documentation": "

The IP address range of the Amazon Web Services service.

", "smithy.api#xmlName": "cidrSet" } }, @@ -85646,7 +85926,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The netmask length of the CIDR you would like to allocate to the public IPv4 pool.

", + "smithy.api#documentation": "

The netmask length of the CIDR you would like to allocate to the public IPv4 pool. The least specific netmask length you can define is 24.

", "smithy.api#required": {} } }, @@ -88526,7 +88806,7 @@ "ImageId": { "target": "com.amazonaws.ec2#ImageId", "traits": { - "smithy.api#documentation": "

The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which\n will resolve to an AMI ID on launch.

\n

Valid formats:

\n
    \n
  • \n

    \n ami-17characters00000\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:version-number\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:label\n

    \n
  • \n
  • \n

    \n resolve:ssm:public-parameter\n

    \n
  • \n
\n \n

Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. \n If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID.

\n
\n

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

The ID of the AMI in the format ami-17characters00000.

\n

Alternatively, you can specify a Systems Manager parameter, using one of the following\n formats. The Systems Manager parameter will resolve to an AMI ID on launch.

\n

To reference a public parameter:

\n
    \n
  • \n

    \n resolve:ssm:public-parameter\n \n

    \n
  • \n
\n

To reference a parameter stored in the same account:

\n
    \n
  • \n

    \n resolve:ssm:parameter-name\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:version-number\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:label\n \n

    \n
  • \n
\n

To reference a parameter shared from another Amazon Web Services account:

\n
    \n
  • \n

    \n resolve:ssm:parameter-ARN\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-ARN:version-number\n \n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-ARN:label\n \n

    \n
  • \n
\n

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

\n \n

If the launch template will be used for an EC2 Fleet or Spot Fleet, note the\n following:

\n
    \n
  • \n

    Only EC2 Fleets of type instant support specifying a Systems\n Manager parameter.

    \n
  • \n
  • \n

    For EC2 Fleets of type maintain or request, or\n for Spot Fleets, you must specify the AMI ID.

    \n
  • \n
\n
" } }, "InstanceType": { @@ -91091,12 +91371,6 @@ "smithy.api#enumValue": "vpc-block-public-access-exclusion" } }, - "vpc_encryption_control": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "vpc-encryption-control" - } - }, "ipam_resource_discovery": { "target": "smithy.api#Unit", "traits": { @@ -92205,7 +92479,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "DestinationPrefixListId", - "smithy.api#documentation": "

The prefix of the Amazon Web Service.

", + "smithy.api#documentation": "

The prefix of the Amazon Web Services service.

", "smithy.api#xmlName": "destinationPrefixListId" } }, @@ -98536,6 +98810,22 @@ "smithy.api#documentation": "

The state of the CIDR block.

", "smithy.api#xmlName": "ipv6CidrBlockState" } + }, + "Ipv6AddressAttribute": { + "target": "com.amazonaws.ec2#Ipv6AddressAttribute", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6AddressAttribute", + "smithy.api#documentation": "

Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services.

", + "smithy.api#xmlName": "ipv6AddressAttribute" + } + }, + "IpSource": { + "target": "com.amazonaws.ec2#IpSource", + "traits": { + "aws.protocols#ec2QueryName": "IpSource", + "smithy.api#documentation": "

The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space.

", + "smithy.api#xmlName": "ipSource" + } } }, "traits": { @@ -107393,6 +107683,22 @@ "smithy.api#documentation": "

The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.

", "smithy.api#xmlName": "ipv6Pool" } + }, + "Ipv6AddressAttribute": { + "target": "com.amazonaws.ec2#Ipv6AddressAttribute", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6AddressAttribute", + "smithy.api#documentation": "

Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services.

", + "smithy.api#xmlName": "ipv6AddressAttribute" + } + }, + "IpSource": { + "target": "com.amazonaws.ec2#IpSource", + "traits": { + "aws.protocols#ec2QueryName": "IpSource", + "smithy.api#documentation": "

The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space.

", + "smithy.api#xmlName": "ipSource" + } } }, "traits": { diff --git a/models/ecr.json b/models/ecr.json index 7cacc1c713..1291f58c1e 100644 --- a/models/ecr.json +++ b/models/ecr.json @@ -29,6 +29,18 @@ ] }, "shapes": { + "com.amazonaws.ecr#AccountSettingName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.ecr#AccountSettingValue": { + "type": "string" + }, "com.amazonaws.ecr#AmazonEC2ContainerRegistry_V20150921": { "type": "service", "version": "2015-09-21", @@ -54,6 +66,9 @@ { "target": "com.amazonaws.ecr#CreateRepository" }, + { + "target": "com.amazonaws.ecr#CreateRepositoryCreationTemplate" + }, { "target": "com.amazonaws.ecr#DeleteLifecyclePolicy" }, @@ -66,6 +81,9 @@ { "target": "com.amazonaws.ecr#DeleteRepository" }, + { + "target": "com.amazonaws.ecr#DeleteRepositoryCreationTemplate" + }, { "target": "com.amazonaws.ecr#DeleteRepositoryPolicy" }, @@ -87,6 +105,12 @@ { "target": "com.amazonaws.ecr#DescribeRepositories" }, + { + "target": "com.amazonaws.ecr#DescribeRepositoryCreationTemplates" + }, + { + "target": "com.amazonaws.ecr#GetAccountSetting" + }, { "target": "com.amazonaws.ecr#GetAuthorizationToken" }, @@ -117,6 +141,9 @@ { "target": "com.amazonaws.ecr#ListTagsForResource" }, + { + "target": "com.amazonaws.ecr#PutAccountSetting" + }, { "target": "com.amazonaws.ecr#PutImage" }, @@ -156,6 +183,9 @@ { "target": "com.amazonaws.ecr#UpdatePullThroughCacheRule" }, + { + "target": "com.amazonaws.ecr#UpdateRepositoryCreationTemplate" + }, { "target": "com.amazonaws.ecr#UploadLayerPart" }, @@ -1905,7 +1935,7 @@ "upstreamRegistryUrl": { "target": "com.amazonaws.ecr#Url", "traits": { - "smithy.api#documentation": "

The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.

\n
    \n
  • \n

    Amazon ECR Public (ecr-public) - public.ecr.aws\n

    \n
  • \n
  • \n

    Docker Hub (docker-hub) -\n registry-1.docker.io\n

    \n
  • \n
  • \n

    Quay (quay) - quay.io\n

    \n
  • \n
  • \n

    Kubernetes (k8s) - registry.k8s.io\n

    \n
  • \n
  • \n

    GitHub Container Registry (github-container-registry) -\n ghcr.io\n

    \n
  • \n
  • \n

    Microsoft Azure Container Registry (azure-container-registry) -\n .azurecr.io\n

    \n
  • \n
  • \n

    GitLab Container Registry (gitlab-container-registry) -\n registry.gitlab.com\n

    \n
  • \n
", + "smithy.api#documentation": "

The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.

\n
    \n
  • \n

    Amazon ECR Public (ecr-public) - public.ecr.aws\n

    \n
  • \n
  • \n

    Docker Hub (docker-hub) -\n registry-1.docker.io\n

    \n
  • \n
  • \n

    Quay (quay) - quay.io\n

    \n
  • \n
  • \n

    Kubernetes (k8s) - registry.k8s.io\n

    \n
  • \n
  • \n

    GitHub Container Registry (github-container-registry) -\n ghcr.io\n

    \n
  • \n
  • \n

    Microsoft Azure Container Registry (azure-container-registry) -\n .azurecr.io\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -2027,6 +2057,119 @@ ] } }, + "com.amazonaws.ecr#CreateRepositoryCreationTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#CreateRepositoryCreationTemplateRequest" + }, + "output": { + "target": "com.amazonaws.ecr#CreateRepositoryCreationTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#LimitExceededException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#TemplateAlreadyExistsException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a repository creation template. This template is used to define the settings\n for repositories created by Amazon ECR on your behalf. For example, repositories created\n through pull through cache actions. For more information, see Private\n repository creation templates in the\n Amazon Elastic Container Registry User Guide.

" + } + }, + "com.amazonaws.ecr#CreateRepositoryCreationTemplateRequest": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "

The repository namespace prefix to associate with the template. All repositories\n created using this namespace prefix will have the settings defined in this template\n applied. For example, a prefix of prod would apply to all repositories\n beginning with prod/. Similarly, a prefix of prod/team would\n apply to all repositories beginning with prod/team/.

\n

To apply a template to all repositories in your registry that don't have an associated\n creation template, you can use ROOT as the prefix.

\n \n

There is always an assumed / applied to the end of the prefix. If you\n specify ecr-public as the prefix, Amazon ECR treats that as\n ecr-public/. When using a pull through cache rule, the repository\n prefix you specify during rule creation is what you should specify as your\n repository creation template prefix as well.

\n
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.ecr#RepositoryTemplateDescription", + "traits": { + "smithy.api#documentation": "

A description for the repository creation template.

" + } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The encryption configuration to use for repositories created using the\n template.

" + } + }, + "resourceTags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "

The metadata to apply to the repository to help you categorize and organize. Each tag\n consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + } + }, + "imageTagMutability": { + "target": "com.amazonaws.ecr#ImageTagMutability", + "traits": { + "smithy.api#documentation": "

The tag mutability setting for the repository. If this parameter is omitted, the\n default setting of MUTABLE will be used which will allow image tags to be\n overwritten. If IMMUTABLE is specified, all image tags within the\n repository will be immutable which will prevent them from being overwritten.

" + } + }, + "repositoryPolicy": { + "target": "com.amazonaws.ecr#RepositoryPolicyText", + "traits": { + "smithy.api#documentation": "

The repository policy to apply to repositories created using the template. A\n repository policy is a permissions policy associated with a repository to control access\n permissions.

" + } + }, + "lifecyclePolicy": { + "target": "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The lifecycle policy to use for repositories created using the template.

" + } + }, + "appliedFor": { + "target": "com.amazonaws.ecr#RCTAppliedForList", + "traits": { + "smithy.api#documentation": "

A list of enumerable strings representing the Amazon ECR repository creation scenarios that\n this template will apply towards. The two supported scenarios are\n PULL_THROUGH_CACHE and REPLICATION\n

", + "smithy.api#required": {} + } + }, + "customRoleArn": { + "target": "com.amazonaws.ecr#CustomRoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as\n the registry that you are configuring. Amazon ECR will assume your supplied role when\n the customRoleArn is specified. When this field isn't specified, Amazon ECR will\n use the service-linked role for the repository creation template.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#CreateRepositoryCreationTemplateResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

The registry ID associated with the request.

" + } + }, + "repositoryCreationTemplate": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The details of the repository creation template associated with the request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#CreateRepositoryRequest": { "type": "structure", "members": { @@ -2099,6 +2242,15 @@ "smithy.api#pattern": "^arn:aws:secretsmanager:[a-zA-Z0-9-:]+:secret:ecr\\-pullthroughcache\\/[a-zA-Z0-9\\/_+=.@-]+$" } }, + "com.amazonaws.ecr#CustomRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, "com.amazonaws.ecr#CvssScore": { "type": "structure", "members": { @@ -2472,6 +2624,67 @@ ] } }, + "com.amazonaws.ecr#DeleteRepositoryCreationTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#DeleteRepositoryCreationTemplateRequest" + }, + "output": { + "target": "com.amazonaws.ecr#DeleteRepositoryCreationTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#TemplateNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a repository creation template.

" + } + }, + "com.amazonaws.ecr#DeleteRepositoryCreationTemplateRequest": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "

The repository namespace prefix associated with the repository creation\n template.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#DeleteRepositoryCreationTemplateResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

The registry ID associated with the request.

" + } + }, + "repositoryCreationTemplate": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The details of the repository creation template that was deleted.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#DeleteRepositoryPolicy": { "type": "operation", "input": { @@ -3056,7 +3269,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The ID of the registry.

" + "smithy.api#documentation": "

The registry ID associated with the request.

" } }, "replicationConfiguration": { @@ -3171,6 +3384,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.ecr#DescribeRepositoryCreationTemplates": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#DescribeRepositoryCreationTemplatesRequest" + }, + "output": { + "target": "com.amazonaws.ecr#DescribeRepositoryCreationTemplatesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns details about the repository creation templates in a registry. The\n prefixes request parameter can be used to return the details for a\n specific repository creation template.

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "repositoryCreationTemplates", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.ecr#DescribeRepositoryCreationTemplatesRequest": { + "type": "structure", + "members": { + "prefixes": { + "target": "com.amazonaws.ecr#PrefixList", + "traits": { + "smithy.api#documentation": "

The repository namespace prefixes associated with the repository creation templates to\n describe. If this value is not specified, all repository creation templates are\n returned.

" + } + }, + "nextToken": { + "target": "com.amazonaws.ecr#NextToken", + "traits": { + "smithy.api#documentation": "

The nextToken value returned from a previous paginated\n DescribeRepositoryCreationTemplates request where\n maxResults was used and the results exceeded the value of that\n parameter. Pagination continues from the end of the previous results that returned the\n nextToken value. This value is null when there are no more\n results to return.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + } + }, + "maxResults": { + "target": "com.amazonaws.ecr#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of repository results returned by\n DescribeRepositoryCreationTemplatesRequest in paginated output. When\n this parameter is used, DescribeRepositoryCreationTemplatesRequest only\n returns maxResults results in a single page along with a\n nextToken response element. The remaining results of the initial\n request can be seen by sending another\n DescribeRepositoryCreationTemplatesRequest request with the returned\n nextToken value. This value can be between 1 and\n 1000. If this parameter is not used, then\n DescribeRepositoryCreationTemplatesRequest returns up to\n 100 results and a nextToken value, if applicable.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#DescribeRepositoryCreationTemplatesResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

The registry ID associated with the request.

" + } + }, + "repositoryCreationTemplates": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplateList", + "traits": { + "smithy.api#documentation": "

The details of the repository creation templates.

" + } + }, + "nextToken": { + "target": "com.amazonaws.ecr#NextToken", + "traits": { + "smithy.api#documentation": "

The nextToken value to include in a future\n DescribeRepositoryCreationTemplates request. When the results of a\n DescribeRepositoryCreationTemplates request exceed\n maxResults, this value can be used to retrieve the next page of\n results. This value is null when there are no more results to\n return.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#EmptyUploadException": { "type": "structure", "members": { @@ -3192,7 +3486,7 @@ "encryptionType": { "target": "com.amazonaws.ecr#EncryptionType", "traits": { - "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES-256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.

", + "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.

", "smithy.api#required": {} } }, @@ -3204,7 +3498,28 @@ } }, "traits": { - "smithy.api#documentation": "

The encryption configuration for the repository. This determines how the contents of\n your repository are encrypted at rest.

\n

By default, when no encryption configuration is set or the AES256\n encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption\n keys which encrypts your data at rest using an AES-256 encryption algorithm. This does\n not require any action on your part.

\n

For more control over the encryption of the contents of your repository, you can use\n server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your\n images. For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

The encryption configuration for the repository. This determines how the contents of\n your repository are encrypted at rest.

\n

By default, when no encryption configuration is set or the AES256\n encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption\n keys which encrypts your data at rest using an AES256 encryption algorithm. This does\n not require any action on your part.

\n

For more control over the encryption of the contents of your repository, you can use\n server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your\n images. For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

" + } + }, + "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate": { + "type": "structure", + "members": { + "encryptionType": { + "target": "com.amazonaws.ecr#EncryptionType", + "traits": { + "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.

", + "smithy.api#required": {} + } + }, + "kmsKey": { + "target": "com.amazonaws.ecr#KmsKeyForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

If you use the KMS encryption type, specify the KMS key to use for\n encryption. The full ARN of the KMS key must be specified. The key must exist in the\n same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS\n key for Amazon ECR will be used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The encryption configuration to associate with the repository creation\n template.

" } }, "com.amazonaws.ecr#EncryptionType": { @@ -3409,6 +3724,64 @@ "smithy.api#default": false } }, + "com.amazonaws.ecr#GetAccountSetting": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#GetAccountSettingRequest" + }, + "output": { + "target": "com.amazonaws.ecr#GetAccountSettingResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the basic scan type version name.

" + } + }, + "com.amazonaws.ecr#GetAccountSettingRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "

Basic scan type version name.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#GetAccountSettingResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "

Retrieves the basic scan type version name.

" + } + }, + "value": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "

Retrieves the value that specifies what basic scan type is being used:\n AWS_NATIVE or CLAIR.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#GetAuthorizationToken": { "type": "operation", "input": { @@ -3831,7 +4204,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The ID of the registry.

" + "smithy.api#documentation": "

The registry ID associated with the request.

" } }, "policyText": { @@ -3881,7 +4254,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The ID of the registry.

" + "smithy.api#documentation": "

The registry ID associated with the request.

" } }, "scanningConfiguration": { @@ -4726,6 +5099,16 @@ } } }, + "com.amazonaws.ecr#KmsKeyForRepositoryCreationTemplate": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^$|arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key\\/[a-z0-9-]+$" + } + }, "com.amazonaws.ecr#Layer": { "type": "structure", "members": { @@ -5091,6 +5474,15 @@ } } }, + "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 30720 + } + } + }, "com.amazonaws.ecr#LifecyclePreviewMaxResults": { "type": "integer", "traits": { @@ -5401,6 +5793,22 @@ "com.amazonaws.ecr#Platform": { "type": "string" }, + "com.amazonaws.ecr#Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$" + } + }, + "com.amazonaws.ecr#PrefixList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#Prefix" + } + }, "com.amazonaws.ecr#ProxyEndpoint": { "type": "string" }, @@ -5509,6 +5917,74 @@ "com.amazonaws.ecr#PushTimestamp": { "type": "timestamp" }, + "com.amazonaws.ecr#PutAccountSetting": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#PutAccountSettingRequest" + }, + "output": { + "target": "com.amazonaws.ecr#PutAccountSettingResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#LimitExceededException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Allows you to change the basic scan type version by setting the name\n parameter to either CLAIR to AWS_NATIVE.

" + } + }, + "com.amazonaws.ecr#PutAccountSettingRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "

Basic scan type version name.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.ecr#AccountSettingValue", + "traits": { + "smithy.api#documentation": "

Setting value that determines what basic scan type is being used:\n AWS_NATIVE or CLAIR.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#PutAccountSettingResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "

Retrieves the the basic scan type version name.

" + } + }, + "value": { + "target": "com.amazonaws.ecr#AccountSettingValue", + "traits": { + "smithy.api#documentation": "

Retrieves the basic scan type value, either AWS_NATIVE or\n -.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#PutImage": { "type": "operation", "input": { @@ -5894,7 +6370,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The registry ID.

" + "smithy.api#documentation": "

The registry ID associated with the request.

" } }, "policyText": { @@ -5985,7 +6461,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the replication configuration for a registry. The existing\n replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the\n PutReplicationConfiguration API is called, a service-linked IAM role is created in\n your account for the replication process. For more information, see Using\n service-linked roles for Amazon ECR in the\n Amazon Elastic Container Registry User Guide.

\n \n

When configuring cross-account replication, the destination account must grant the\n source account permission to replicate. This permission is controlled using a\n registry permissions policy. For more information, see PutRegistryPolicy.

\n
" + "smithy.api#documentation": "

Creates or updates the replication configuration for a registry. The existing\n replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the\n PutReplicationConfiguration API is called, a service-linked IAM role is created in\n your account for the replication process. For more information, see Using\n service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide.\n For more information on the custom role for replication, see Creating an IAM role for replication.

\n \n

When configuring cross-account replication, the destination account must grant the\n source account permission to replicate. This permission is controlled using a\n registry permissions policy. For more information, see PutRegistryPolicy.

\n
" } }, "com.amazonaws.ecr#PutReplicationConfigurationRequest": { @@ -6017,6 +6493,29 @@ "smithy.api#output": {} } }, + "com.amazonaws.ecr#RCTAppliedFor": { + "type": "enum", + "members": { + "REPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLICATION" + } + }, + "PULL_THROUGH_CACHE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PULL_THROUGH_CACHE" + } + } + } + }, + "com.amazonaws.ecr#RCTAppliedForList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RCTAppliedFor" + } + }, "com.amazonaws.ecr#Reason": { "type": "string" }, @@ -6357,6 +6856,86 @@ "smithy.api#error": "client" } }, + "com.amazonaws.ecr#RepositoryCreationTemplate": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "

The repository namespace prefix associated with the repository creation\n template.

" + } + }, + "description": { + "target": "com.amazonaws.ecr#RepositoryTemplateDescription", + "traits": { + "smithy.api#documentation": "

The description associated with the repository creation template.

" + } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The encryption configuration associated with the repository creation template.

" + } + }, + "resourceTags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "

The metadata to apply to the repository to help you categorize and organize. Each tag\n consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + } + }, + "imageTagMutability": { + "target": "com.amazonaws.ecr#ImageTagMutability", + "traits": { + "smithy.api#documentation": "

The tag mutability setting for the repository. If this parameter is omitted, the\n default setting of MUTABLE will be used which will allow image tags to be overwritten.\n If IMMUTABLE is specified, all image tags within the repository will be immutable which\n will prevent them from being overwritten.

" + } + }, + "repositoryPolicy": { + "target": "com.amazonaws.ecr#RepositoryPolicyText", + "traits": { + "smithy.api#documentation": "

he repository policy to apply to repositories created using the template. A repository\n policy is a permissions policy associated with a repository to control access\n permissions.

" + } + }, + "lifecyclePolicy": { + "target": "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The lifecycle policy to use for repositories created using the template.

" + } + }, + "appliedFor": { + "target": "com.amazonaws.ecr#RCTAppliedForList", + "traits": { + "smithy.api#documentation": "

A list of enumerable Strings representing the repository creation scenarios that this\n template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and\n REPLICATION

" + } + }, + "customRoleArn": { + "target": "com.amazonaws.ecr#CustomRoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when\n the customRoleArn is specified. When this field isn't specified, Amazon ECR will\n use the service-linked role for the repository creation template.

" + } + }, + "createdAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

The date and time, in JavaScript date format, when the repository creation template\n was created.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

The date and time, in JavaScript date format, when the repository creation template\n was last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the repository creation template associated with the request.

" + } + }, + "com.amazonaws.ecr#RepositoryCreationTemplateList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate" + } + }, "com.amazonaws.ecr#RepositoryFilter": { "type": "structure", "members": { @@ -6571,6 +7150,15 @@ "target": "com.amazonaws.ecr#RepositoryScanningConfiguration" } }, + "com.amazonaws.ecr#RepositoryTemplateDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, "com.amazonaws.ecr#Resource": { "type": "structure", "members": { @@ -7290,6 +7878,30 @@ "target": "com.amazonaws.ecr#TagValue" } }, + "com.amazonaws.ecr#TemplateAlreadyExistsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The repository creation template already exists. Specify a unique prefix and try\n again.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.ecr#TemplateNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The specified repository creation template can't be found. Verify the registry ID and\n prefix and try again.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.ecr#Title": { "type": "string" }, @@ -7533,6 +8145,112 @@ "smithy.api#output": {} } }, + "com.amazonaws.ecr#UpdateRepositoryCreationTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#UpdateRepositoryCreationTemplateRequest" + }, + "output": { + "target": "com.amazonaws.ecr#UpdateRepositoryCreationTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#TemplateNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an existing repository creation template.

" + } + }, + "com.amazonaws.ecr#UpdateRepositoryCreationTemplateRequest": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "

The repository namespace prefix that matches an existing repository creation template\n in the registry. All repositories created using this namespace prefix will have the\n settings defined in this template applied. For example, a prefix of prod\n would apply to all repositories beginning with prod/. This includes a\n repository named prod/team1 as well as a repository named\n prod/repository1.

\n

To apply a template to all repositories in your registry that don't have an associated\n creation template, you can use ROOT as the prefix.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.ecr#RepositoryTemplateDescription", + "traits": { + "smithy.api#documentation": "

A description for the repository creation template.

" + } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate" + }, + "resourceTags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "

The metadata to apply to the repository to help you categorize and organize. Each tag\n consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + } + }, + "imageTagMutability": { + "target": "com.amazonaws.ecr#ImageTagMutability", + "traits": { + "smithy.api#documentation": "

Updates the tag mutability setting for the repository. If this parameter is omitted,\n the default setting of MUTABLE will be used which will allow image tags to\n be overwritten. If IMMUTABLE is specified, all image tags within the\n repository will be immutable which will prevent them from being overwritten.

" + } + }, + "repositoryPolicy": { + "target": "com.amazonaws.ecr#RepositoryPolicyText", + "traits": { + "smithy.api#documentation": "

Updates the repository policy created using the template. A repository policy is a\n permissions policy associated with a repository to control access permissions.

" + } + }, + "lifecyclePolicy": { + "target": "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

Updates the lifecycle policy associated with the specified repository creation\n template.

" + } + }, + "appliedFor": { + "target": "com.amazonaws.ecr#RCTAppliedForList", + "traits": { + "smithy.api#documentation": "

Updates the list of enumerable strings representing the Amazon ECR repository creation\n scenarios that this template will apply towards. The two supported scenarios are\n PULL_THROUGH_CACHE and REPLICATION\n

" + } + }, + "customRoleArn": { + "target": "com.amazonaws.ecr#CustomRoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as\n the registry that you are configuring. Amazon ECR will assume your supplied role when\n the customRoleArn is specified. When this field isn't specified, Amazon ECR will\n use the service-linked role for the repository creation template.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#UpdateRepositoryCreationTemplateResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

The registry ID associated with the request.

" + } + }, + "repositoryCreationTemplate": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "

The details of the repository creation template associated with the request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#UpdatedTimestamp": { "type": "timestamp" }, diff --git a/models/ecs.json b/models/ecs.json index c9ef466b67..4bf7c79d66 100644 --- a/models/ecs.json +++ b/models/ecs.json @@ -1518,7 +1518,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object representing the networking details for a task or service. For example\n\t\t\t\tawsvpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}\n

" + "smithy.api#documentation": "

An object representing the networking details for a task or service. For example\n\t\t\t\tawsVpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}.

" } }, "com.amazonaws.ecs#BlockedException": { @@ -1681,7 +1681,7 @@ } }, "traits": { - "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

With FARGATE_SPOT, you can run interruption\n\t\t\ttolerant tasks at a rate that's discounted compared to the FARGATE price.\n\t\t\t\tFARGATE_SPOT runs tasks on spare compute capacity. When Amazon Web Services needs the\n\t\t\tcapacity back, your tasks are interrupted with a two-minute warning.\n\t\t\t\tFARGATE_SPOT only supports Linux tasks with the X86_64 architecture on\n\t\t\tplatform version 1.3.0 or later.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

With FARGATE_SPOT, you can run interruption tolerant tasks at a rate\n\t\t\tthat's discounted compared to the FARGATE price. FARGATE_SPOT\n\t\t\truns tasks on spare compute capacity. When Amazon Web Services needs the capacity back, your tasks are\n\t\t\tinterrupted with a two-minute warning. FARGATE_SPOT only supports Linux\n\t\t\ttasks with the X86_64 architecture on platform version 1.3.0 or later.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" } }, "com.amazonaws.ecs#CapacityProviderStrategyItemBase": { @@ -1762,7 +1762,7 @@ } }, "traits": { - "smithy.api#documentation": "

These errors are usually caused by a client action. This client action might be using\n\t\t\tan action or resource on behalf of a user that doesn't have permissions to use the\n\t\t\taction or resource. Or, it might be specifying an identifier that isn't valid.

", + "smithy.api#documentation": "

These errors are usually caused by a client action. This client action might be using\n\t\t\tan action or resource on behalf of a user that doesn't have permissions to use the\n\t\t\taction or resource. Or, it might be specifying an identifier that isn't valid.

\n

The following list includes additional causes for the error:

\n
    \n
  • \n

    The RunTask could not be processed because you use managed\n\t\t\t\t\tscaling and there is a capacity error because the quota of tasks in the\n\t\t\t\t\t\tPROVISIONING per cluster has been reached. For information\n\t\t\t\t\tabout the service quotas, see Amazon ECS\n\t\t\t\t\t\tservice quotas.

    \n
  • \n
", "smithy.api#error": "client" } }, @@ -2279,13 +2279,13 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name of one container can be entered in the\n\t\t\t\tlinks of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--name option to docker\n\t\t\trun.

" + "smithy.api#documentation": "

The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name of one container can be entered in the\n\t\t\t\tlinks of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in tthe docker create-container command and the\n\t\t\t\t--name option to docker\n\t\t\trun.

" } }, "image": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n or \n repository-url/image@digest\n . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\tIMAGE parameter of docker\n\t\t\t\trun.

\n
    \n
  • \n

    When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.

    \n
  • \n
  • \n

    Images in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag or\n\t\t\t\t\t\tregistry/repository@digest. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/:latest\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.\n\t\t\t\t

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu).

    \n
  • \n
" + "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n or \n repository-url/image@digest\n . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker create-container command and the\n\t\t\t\tIMAGE parameter of docker\n\t\t\t\trun.

\n
    \n
  • \n

    When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.

    \n
  • \n
  • \n

    Images in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag or\n\t\t\t\t\t\tregistry/repository@digest. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/:latest\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.\n\t\t\t\t

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu).

    \n
  • \n
" } }, "repositoryCredentials": { @@ -2298,31 +2298,31 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of cpu units reserved for the container. This parameter maps\n\t\t\tto CpuShares in the Create a container section of the\n\t\t\tDocker Remote API and the --cpu-shares option to docker run.

\n

This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu value.

\n \n

You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.

\n
\n

Linux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.

\n

On Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. For more\n\t\t\tinformation, see CPU share\n\t\t\t\tconstraint in the Docker documentation. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 in your container definitions. For CPU values below 2\n\t\t\t(including null), the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:

\n
    \n
  • \n

    \n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.

    \n
  • \n
\n

On Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0, which Windows interprets as 1% of one CPU.

" + "smithy.api#documentation": "

The number of cpu units reserved for the container. This parameter maps\n\t\t\tto CpuShares in the docker create-container commandand the --cpu-shares option to docker run.

\n

This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu value.

\n \n

You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.

\n
\n

Linux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.

\n

On Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2, and the\n\t\t\tmaximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2\n\t\t\t(including null) or above 262144, the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:

\n
    \n
  • \n

    \n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to\n\t\t\t\t\t\t1.84.0: CPU values greater than 256 vCPU are passed to Docker as\n\t\t\t\t\t256, which is equivalent to 262144 CPU shares.

    \n
  • \n
\n

On Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0, which Windows interprets as 1% of one CPU.

" } }, "memory": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory value, if one is specified. This parameter maps to\n\t\t\t\tMemory in the Create a container section of the\n\t\t\tDocker Remote API and the --memory option to docker run.

\n

If using the Fargate launch type, this parameter is optional.

\n

If using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory and memoryReservation value, memory\n\t\t\tmust be greater than memoryReservation. If you specify\n\t\t\t\tmemoryReservation, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory is used.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" + "smithy.api#documentation": "

The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory value, if one is specified. This parameter maps to\n\t\t\tMemory in thethe docker create-container command and the --memory option to docker run.

\n

If using the Fargate launch type, this parameter is optional.

\n

If using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory and memoryReservation value, memory\n\t\t\tmust be greater than memoryReservation. If you specify\n\t\t\t\tmemoryReservation, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory is used.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" } }, "memoryReservation": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation in the Create a container section of\n\t\t\tthe Docker Remote API and the --memory-reservation option to docker run.

\n

If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory or memoryReservation in a container\n\t\t\tdefinition. If you specify both, memory must be greater than\n\t\t\t\tmemoryReservation. If you specify memoryReservation, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory is\n\t\t\tused.

\n

For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation of 128 MiB, and a memory hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" + "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation in the the docker create-container command and the --memory-reservation option to docker run.

\n

If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory or memoryReservation in a container\n\t\t\tdefinition. If you specify both, memory must be greater than\n\t\t\t\tmemoryReservation. If you specify memoryReservation, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory is\n\t\t\tused.

\n

For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation of 128 MiB, and a memory hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" } }, "links": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The links parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge. The name:internalName\n\t\t\tconstruct is analogous to name:alias in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to\n\t\t\t\tLegacy container links\n\t\t\tin the Docker documentation. This parameter maps to Links in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--link option to docker\n\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

Containers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.

\n
" + "smithy.api#documentation": "

The links parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge. The name:internalName\n\t\t\tconstruct is analogous to name:alias in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker create-container command and the\n\t\t\t\t--link option to docker\n\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

Containers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.

\n
" } }, "portMappings": { "target": "com.amazonaws.ecs#PortMappingList", "traits": { - "smithy.api#documentation": "

The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.

\n

For task definitions that use the awsvpc network mode, only specify the\n\t\t\t\tcontainerPort. The hostPort can be left blank or it must\n\t\t\tbe the same value as the containerPort.

\n

Port mappings on Windows use the NetNAT gateway address rather than\n\t\t\t\tlocalhost. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.

\n

This parameter maps to PortBindings in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--publish option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.

\n \n

After a task reaches the RUNNING status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings section DescribeTasks\n\t\t\t\tresponses.

\n
" + "smithy.api#documentation": "

The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.

\n

For task definitions that use the awsvpc network mode, only specify the\n\t\t\t\tcontainerPort. The hostPort can be left blank or it must\n\t\t\tbe the same value as the containerPort.

\n

Port mappings on Windows use the NetNAT gateway address rather than\n\t\t\t\tlocalhost. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.

\n

This parameter maps to PortBindings in the\n\t\t\tthe docker create-container command and the\n\t\t\t\t--publish option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.

\n \n

After a task reaches the RUNNING status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings section DescribeTasks\n\t\t\t\tresponses.

\n
" } }, "essential": { @@ -2331,40 +2331,46 @@ "smithy.api#documentation": "

If the essential parameter of a container is marked as true,\n\t\t\tand that container fails or stops for any reason, all other containers that are part of\n\t\t\tthe task are stopped. If the essential parameter of a container is marked\n\t\t\tas false, its failure doesn't affect the rest of the containers in a task.\n\t\t\tIf this parameter is omitted, a container is assumed to be essential.

\n

All tasks must have at least one essential container. If you have an application\n\t\t\tthat's composed of multiple containers, group containers that are used for a common\n\t\t\tpurpose into components, and separate the different components into multiple task\n\t\t\tdefinitions. For more information, see Application\n\t\t\t\tArchitecture in the Amazon Elastic Container Service Developer Guide.

" } }, + "restartPolicy": { + "target": "com.amazonaws.ecs#ContainerRestartPolicy", + "traits": { + "smithy.api#documentation": "

The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the\n\t\t\ttask. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.

" + } + }, "entryPoint": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "\n

Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint parameters. If you have problems using\n\t\t\t\t\tentryPoint, update your container agent or enter your commands and\n\t\t\t\targuments as command array items instead.

\n
\n

The entry point that's passed to the container. This parameter maps to\n\t\t\t\tEntrypoint in the Create a container section of the\n\t\t\tDocker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" + "smithy.api#documentation": "\n

Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint parameters. If you have problems using\n\t\t\t\t\tentryPoint, update your container agent or enter your commands and\n\t\t\t\targuments as command array items instead.

\n
\n

The entry point that's passed to the container. This parameter maps to\n\t\t\tEntrypoint in tthe docker create-container command and the --entrypoint option to docker run.

" } }, "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The command that's passed to the container. This parameter maps to Cmd in\n\t\t\tthe Create a container section of the Docker Remote API and the\n\t\t\t\tCOMMAND parameter to docker\n\t\t\t\trun. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each\n\t\t\targument is a separated string in the array.

" + "smithy.api#documentation": "

The command that's passed to the container. This parameter maps to Cmd in\n\t\t\tthe docker create-container command and the\n\t\t\t\tCOMMAND parameter to docker\n\t\t\t\trun. If there are multiple arguments, each\n\t\t\targument is a separated string in the array.

" } }, "environment": { "target": "com.amazonaws.ecs#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to\n\t\t\t\tEnv in the Create a container section of the\n\t\t\tDocker Remote API and the --env option to docker run.

\n \n

We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.

\n
" + "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to\n\t\t\tEnv in the docker create-container command and the --env option to docker run.

\n \n

We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.

\n
" } }, "environmentFiles": { "target": "com.amazonaws.ecs#EnvironmentFiles", "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file option to docker run.

\n

You can specify up to ten environment files. The file must have a .env\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE format. Lines beginning with # are treated\n\t\t\tas comments and are ignored. For more information about the environment variable file\n\t\t\tsyntax, see Declare default\n\t\t\t\tenvironment variables in file.

\n

If there are environment variables specified using the environment\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file option to docker run.

\n

You can specify up to ten environment files. The file must have a .env\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE format. Lines beginning with # are treated\n\t\t\tas comments and are ignored.

\n

If there are environment variables specified using the environment\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.

" } }, "mountPoints": { "target": "com.amazonaws.ecs#MountPointList", "traits": { - "smithy.api#documentation": "

The mount points for data volumes in your container.

\n

This parameter maps to Volumes in the Create a container\n\t\t\tsection of the Docker Remote API and the --volume option to docker run.

\n

Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.

" + "smithy.api#documentation": "

The mount points for data volumes in your container.

\n

This parameter maps to Volumes in the the docker create-container command and the --volume option to docker run.

\n

Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.

" } }, "volumesFrom": { "target": "com.amazonaws.ecs#VolumeFromList", "traits": { - "smithy.api#documentation": "

Data volumes to mount from another container. This parameter maps to\n\t\t\t\tVolumesFrom in the Create a container section of the\n\t\t\tDocker Remote API and the --volumes-from option to docker run.

" + "smithy.api#documentation": "

Data volumes to mount from another container. This parameter maps to\n\t\t\tVolumesFrom in tthe docker create-container command and the --volumes-from option to docker run.

" } }, "linuxParameters": { @@ -2388,7 +2394,7 @@ "startTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE,\n\t\t\tSUCCESS, or HEALTHY status. If a startTimeout\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED state.

\n \n

When the ECS_CONTAINER_START_TIMEOUT container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.

\n
\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0 of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init\n\t\t\tpackage. If your container instances are launched from version 20190301 or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

The valid values are 2-120 seconds.

" + "smithy.api#documentation": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE,\n\t\t\tSUCCESS, or HEALTHY status. If a startTimeout\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED state.

\n \n

When the ECS_CONTAINER_START_TIMEOUT container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.

\n
\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0 of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init\n\t\t\tpackage. If your container instances are launched from version 20190301 or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

The valid values for Fargate are 2-120 seconds.

" } }, "stopTimeout": { @@ -2400,103 +2406,103 @@ "hostname": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The hostname to use for your container. This parameter maps to Hostname\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--hostname option to docker\n\t\t\t\trun.

\n \n

The hostname parameter is not supported if you're using the\n\t\t\t\t\tawsvpc network mode.

\n
" + "smithy.api#documentation": "

The hostname to use for your container. This parameter maps to Hostname\n\t\t\tin thethe docker create-container command and the\n\t\t\t\t--hostname option to docker\n\t\t\t\trun.

\n \n

The hostname parameter is not supported if you're using the\n\t\t\t\t\tawsvpc network mode.

\n
" } }, "user": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The user to use inside the container. This parameter maps to User in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--user option to docker\n\t\t\trun.

\n \n

When running tasks using the host network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.

\n
\n

You can specify the user using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n user:group\n

    \n
  • \n
  • \n

    \n uid\n

    \n
  • \n
  • \n

    \n uid:gid\n

    \n
  • \n
  • \n

    \n user:gid\n

    \n
  • \n
  • \n

    \n uid:group\n

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

The user to use inside the container. This parameter maps to User in the docker create-container command and the\n\t\t\t\t--user option to docker\n\t\t\trun.

\n \n

When running tasks using the host network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.

\n
\n

You can specify the user using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n user:group\n

    \n
  • \n
  • \n

    \n uid\n

    \n
  • \n
  • \n

    \n uid:gid\n

    \n
  • \n
  • \n

    \n user:gid\n

    \n
  • \n
  • \n

    \n uid:group\n

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
" } }, "workingDirectory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The working directory to run commands inside the container in. This parameter maps to\n\t\t\t\tWorkingDir in the Create a container section of the\n\t\t\tDocker Remote API and the --workdir option to docker run.

" + "smithy.api#documentation": "

The working directory to run commands inside the container in. This parameter maps to\n\t\t\tWorkingDir in the docker create-container command and the --workdir option to docker run.

" } }, "disableNetworking": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled in the Create a container section\n\t\t\tof the Docker Remote API.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

When this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled in the docker create-container command.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "privileged": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root user). This parameter maps to\n\t\t\t\tPrivileged in the Create a container section of the\n\t\t\tDocker Remote API and the --privileged option to docker run.

\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" + "smithy.api#documentation": "

When this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root user). This parameter maps to\n\t\t\tPrivileged in the the docker create-container command and the --privileged option to docker run

\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" } }, "readonlyRootFilesystem": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--read-only option to docker\n\t\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

When this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs in the docker create-container command and the\n\t\t\t\t--read-only option to docker\n\t\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "dnsServers": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of DNS servers that are presented to the container. This parameter maps to\n\t\t\t\tDns in the Create a container section of the\n\t\t\tDocker Remote API and the --dns option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of DNS servers that are presented to the container. This parameter maps to\n\t\t\tDns in the the docker create-container command and the --dns option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "dnsSearchDomains": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch in the Create a container section of the\n\t\t\tDocker Remote API and the --dns-search option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch in the docker create-container command and the --dns-search option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "extraHosts": { "target": "com.amazonaws.ecs#HostEntryList", "traits": { - "smithy.api#documentation": "

A list of hostnames and IP address mappings to append to the /etc/hosts\n\t\t\tfile on the container. This parameter maps to ExtraHosts in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--add-host option to docker\n\t\t\t\trun.

\n \n

This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc network mode.

\n
" + "smithy.api#documentation": "

A list of hostnames and IP address mappings to append to the /etc/hosts\n\t\t\tfile on the container. This parameter maps to ExtraHosts in the docker create-container command and the\n\t\t\t\t--add-host option to docker\n\t\t\t\trun.

\n \n

This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc network mode.

\n
" } }, "dockerSecurityOptions": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of strings to provide custom configuration for multiple security systems. For\n\t\t\tmore information about valid values, see Docker\n\t\t\t\tRun Security Configuration. This field isn't valid for containers in tasks\n\t\t\tusing the Fargate launch type.

\n

For Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.

\n

For any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.

\n

This parameter maps to SecurityOpt in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--security-opt option to docker\n\t\t\t\trun.

\n \n

The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
\n

For more information about valid values, see Docker\n\t\t\t\tRun Security Configuration.

\n

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"

" + "smithy.api#documentation": "

A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks\n\t\t\tusing the Fargate launch type.

\n

For Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.

\n

For any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.

\n

This parameter maps to SecurityOpt in the docker create-container command and the\n\t\t\t\t--security-opt option to docker\n\t\t\t\trun.

\n \n

The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
\n

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"

" } }, "interactive": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, you can deploy containerized applications\n\t\t\tthat require stdin or a tty to be allocated. This parameter\n\t\t\tmaps to OpenStdin in the Create a container section of the\n\t\t\tDocker Remote API and the --interactive option to docker run.

" + "smithy.api#documentation": "

When this parameter is true, you can deploy containerized applications\n\t\t\tthat require stdin or a tty to be allocated. This parameter\n\t\t\tmaps to OpenStdin in the docker create-container command and the --interactive option to docker run.

" } }, "pseudoTerminal": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, a TTY is allocated. This parameter maps to\n\t\t\t\tTty in the Create a container section of the\n\t\t\tDocker Remote API and the --tty option to docker run.

" + "smithy.api#documentation": "

When this parameter is true, a TTY is allocated. This parameter maps to\n\t\t\tTty in tthe docker create-container command and the --tty option to docker run.

" } }, "dockerLabels": { "target": "com.amazonaws.ecs#DockerLabelsMap", "traits": { - "smithy.api#documentation": "

A key/value map of labels to add to the container. This parameter maps to\n\t\t\t\tLabels in the Create a container section of the\n\t\t\tDocker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" + "smithy.api#documentation": "

A key/value map of labels to add to the container. This parameter maps to\n\t\t\tLabels in the docker create-container command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" } }, "ulimits": { "target": "com.amazonaws.ecs#UlimitList", "traits": { - "smithy.api#documentation": "

A list of ulimits to set in the container. If a ulimit value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits in the Create a container section\n\t\t\tof the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 1024 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of ulimits to set in the container. If a ulimit value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits in tthe docker create-container command and the --ulimit option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "logConfiguration": { "target": "com.amazonaws.ecs#LogConfiguration", "traits": { - "smithy.api#documentation": "

The log configuration specification for the container.

\n

This parameter maps to LogConfig in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions). For more information about the options for different supported log drivers,\n\t\t\tsee Configure\n\t\t\t\tlogging drivers in the Docker documentation.

\n \n

Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.

\n
\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

The log configuration specification for the container.

\n

This parameter maps to LogConfig in the docker create-container command and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions).

\n \n

Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.

\n
\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
" } }, "healthCheck": { "target": "com.amazonaws.ecs#HealthCheck", "traits": { - "smithy.api#documentation": "

The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\tHEALTHCHECK parameter of docker\n\t\t\t\trun.

" + "smithy.api#documentation": "

The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck in the docker create-container command and the\n\t\t\t\tHEALTHCHECK parameter of docker\n\t\t\t\trun.

" } }, "systemControls": { "target": "com.amazonaws.ecs#SystemControls", "traits": { - "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

" + "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls in tthe docker create-container command and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

" } }, "resourceRequirements": { @@ -2822,6 +2828,33 @@ "target": "com.amazonaws.ecs#ContainerOverride" } }, + "com.amazonaws.ecs#ContainerRestartPolicy": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.ecs#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Specifies whether a restart policy is enabled for the\n\t\t\tcontainer.

", + "smithy.api#required": {} + } + }, + "ignoredExitCodes": { + "target": "com.amazonaws.ecs#IntegerList", + "traits": { + "smithy.api#documentation": "

A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit\n\t\t\tcodes. By default, Amazon ECS does not ignore\n\t\t\tany exit codes.

" + } + }, + "restartAttemptPeriod": { + "target": "com.amazonaws.ecs#BoxedInteger", + "traits": { + "smithy.api#documentation": "

A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be\n\t\t\trestarted only once every restartAttemptPeriod seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum\n\t\t\trestartAttemptPeriod of 60 seconds and a maximum restartAttemptPeriod of 1800 seconds.\n\t\t\tBy default, a container must run for 300 seconds before it can be restarted.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

You can enable a restart policy for each container defined in your\n\t\t\ttask definition, to overcome transient failures faster and maintain task availability. When you\n\t\t\tenable a restart policy for a container, Amazon ECS can restart the container if it exits, without needing to replace\n\t\t\tthe task. For more information, see Restart individual containers\n\t\t\t\tin Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.

" + } + }, "com.amazonaws.ecs#ContainerStateChange": { "type": "structure", "members": { @@ -3103,7 +3136,7 @@ } ], "traits": { - "smithy.api#documentation": "

Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and are reported as\n\t\t\thealthy by the load balancer.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
\n

You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

\n

If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

\n

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

\n

If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

\n

When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

\n

When the service scheduler launches new tasks, it determines task placement. For information\n\t\t\tabout task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n

\n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

", + "smithy.api#documentation": "

Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and are reported as\n\t\t\thealthy by the load balancer.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
\n

You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

\n

If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

\n

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

\n

If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

\n

When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

\n

When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n

\n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

", "smithy.api#examples": [ { "title": "To create a new service", @@ -3262,7 +3295,7 @@ "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

\n

The FARGATE launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.

\n \n

Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS Developer Guide.

\n
\n

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.

\n

The EXTERNAL launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.

\n

A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType is specified, the capacityProviderStrategy\n\t\t\tparameter must be omitted.

" + "smithy.api#documentation": "

The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

\n

The FARGATE launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.

\n \n

Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the Amazon ECS\n\t\t\t\t\tDeveloper Guide.

\n
\n

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.

\n

The EXTERNAL launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.

\n

A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType is specified, the capacityProviderStrategy\n\t\t\tparameter must be omitted.

" } }, "capacityProviderStrategy": { @@ -3274,7 +3307,7 @@ "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version that your tasks in the service are running on. A platform version\n\t\t\tis specified only for tasks using the Fargate launch type. If one isn't\n\t\t\tspecified, the LATEST platform version is used. For more information, see\n\t\t\t\tFargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version that your tasks in the service are running on. A platform version\n\t\t\tis specified only for tasks using the Fargate launch type. If one isn't\n\t\t\tspecified, the LATEST platform version is used. For more information, see\n\t\t\t\tFargate platform\n\t\t\t\tversions in the Amazon Elastic Container Service Developer Guide.

" } }, "role": { @@ -3310,7 +3343,7 @@ "healthCheckGracePeriodSeconds": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0 is used.

\n

If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod in\n\t\t\tthe task definition health check parameters. For more information, see Health\n\t\t\t\tcheck.

\n

If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can\n\t\t\tspecify a health check grace period of up to 2,147,483,647 seconds (about 69 years).\n\t\t\tDuring that time, the Amazon ECS service scheduler ignores health check status. This grace\n\t\t\tperiod can prevent the service scheduler from marking tasks as unhealthy and stopping\n\t\t\tthem before they have time to come up.

" + "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0 is used.

\n

If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod in\n\t\t\tthe task definition health check parameters. For more information, see Health\n\t\t\t\tcheck.

\n

If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to 2,147,483,647 seconds (about 69 years).\n\t\t\tDuring that time, the Amazon ECS service scheduler ignores health check status. This grace\n\t\t\tperiod can prevent the service scheduler from marking tasks as unhealthy and stopping\n\t\t\tthem before they have time to come up.

" } }, "schedulingStrategy": { @@ -3341,7 +3374,7 @@ "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.

\n

You must set this to a value other than NONE when you use Cost Explorer. For more information, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide.

\n

The default is NONE.

" + "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.

\n

You must set this to a value other than NONE when you use Cost Explorer.\n\t\t\tFor more information, see Amazon ECS usage reports\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

The default is NONE.

" } }, "enableExecuteCommand": { @@ -3426,7 +3459,7 @@ } ], "traits": { - "smithy.api#documentation": "

Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For information about the maximum number of task sets and otther quotas, see Amazon ECS\n\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For information about the maximum number of task sets and other quotas, see Amazon ECS\n\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#CreateTaskSetRequest": { @@ -3555,29 +3588,29 @@ "smithy.api#documentation": "

Disables an account setting for a specified user, role, or the root user for an\n\t\t\taccount.

", "smithy.api#examples": [ { - "title": "To delete your account setting", - "documentation": "This example deletes the account setting for your user for the specified resource type.", + "title": "To delete the account settings for a specific IAM user or IAM role", + "documentation": "This example deletes the account setting for a specific IAM user or IAM role for the specified resource type. Only the root user can view or modify the account settings for another user.", "input": { - "name": "serviceLongArnFormat" + "name": "containerInstanceLongArnFormat", + "principalArn": "arn:aws:iam:::user/principalName" }, "output": { "setting": { - "name": "serviceLongArnFormat", + "name": "containerInstanceLongArnFormat", "value": "enabled", "principalArn": "arn:aws:iam:::user/principalName" } } }, { - "title": "To delete the account settings for a specific IAM user or IAM role", - "documentation": "This example deletes the account setting for a specific IAM user or IAM role for the specified resource type. Only the root user can view or modify the account settings for another user.", + "title": "To delete your account setting", + "documentation": "This example deletes the account setting for your user for the specified resource type.", "input": { - "name": "containerInstanceLongArnFormat", - "principalArn": "arn:aws:iam:::user/principalName" + "name": "serviceLongArnFormat" }, "output": { "setting": { - "name": "containerInstanceLongArnFormat", + "name": "serviceLongArnFormat", "value": "enabled", "principalArn": "arn:aws:iam:::user/principalName" } @@ -4254,7 +4287,7 @@ "minimumHealthyPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tminimumHealthyPercent represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING state during a deployment,\n\t\t\tas a percentage of the desiredCount (rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount of four tasks and a\n\t\t\t\tminimumHealthyPercent of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.

\n

For services that do not use a load balancer, the following\n\t\t\tshould be noted:

\n
    \n
  • \n

    A service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.

    \n
  • \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.

    \n
  • \n
\n

For services that do use a load balancer, the following should be\n\t\t\tnoted:

\n
    \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.

    \n
  • \n
\n

The default value for a replica service for\n\t\t\tminimumHealthyPercent is 100%. The default\n\t\t\tminimumHealthyPercent value for a service using\n\t\t\tthe DAEMON service schedule is 0% for the CLI,\n\t\t\tthe Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console.

\n

The minimum number of healthy tasks during a deployment is the\n\t\t\tdesiredCount multiplied by the\n\t\t\tminimumHealthyPercent/100, rounded up to the\n\t\t\tnearest integer value.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING\n\t\t\tstate while the container instances are in the DRAINING state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.

" + "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tminimumHealthyPercent represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING state during a deployment,\n\t\t\tas a percentage of the desiredCount (rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount of four tasks and a\n\t\t\t\tminimumHealthyPercent of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.

\n

For services that do not use a load balancer, the following\n\t\t\tshould be noted:

\n
    \n
  • \n

    A service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.

    \n
  • \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.

    \n
  • \n
\n

For services that do use a load balancer, the following should be\n\t\t\tnoted:

\n
    \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.

    \n
  • \n
\n

The default value for a replica service for minimumHealthyPercent is\n\t\t\t100%. The default minimumHealthyPercent value for a service using the\n\t\t\t\tDAEMON service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the\n\t\t\tAPIs and 50% for the Amazon Web Services Management Console.

\n

The minimum number of healthy tasks during a deployment is the\n\t\t\t\tdesiredCount multiplied by the minimumHealthyPercent/100,\n\t\t\trounded up to the nearest integer value.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING\n\t\t\tstate while the container instances are in the DRAINING state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.

" } }, "alarms": { @@ -4312,7 +4345,7 @@ "kmsKeyId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment.

" + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for\n\t\t\tdeployment.

" } } }, @@ -5558,19 +5591,19 @@ "driver": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. For more information, see Docker\n\t\t\t\tplugin discovery. This parameter maps to Driver in the\n\t\t\tCreate a volume section of the Docker Remote API and the\n\t\t\t\txxdriver option to docker\n\t\t\t\tvolume create.

" + "smithy.api#documentation": "

The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. This parameter maps to Driver in the docker create-container command and the\n\t\t\t\txxdriver option to docker\n\t\t\t\tvolume create.

" } }, "driverOpts": { "target": "com.amazonaws.ecs#StringMap", "traits": { - "smithy.api#documentation": "

A map of Docker driver-specific options passed through. This parameter maps to\n\t\t\t\tDriverOpts in the Create a volume section of the\n\t\t\tDocker Remote API and the xxopt option to docker\n\t\t\t\tvolume create.

" + "smithy.api#documentation": "

A map of Docker driver-specific options passed through. This parameter maps to\n\t\t\t\tDriverOpts in the docker create-volume command and the xxopt option to docker\n\t\t\t\tvolume create.

" } }, "labels": { "target": "com.amazonaws.ecs#StringMap", "traits": { - "smithy.api#documentation": "

Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels in the Create a volume section of the\n\t\t\tDocker Remote API and the xxlabel option to docker\n\t\t\t\tvolume create.

" + "smithy.api#documentation": "

Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels in the docker create-container command and the xxlabel option to docker\n\t\t\t\tvolume create.

" } } }, @@ -5755,13 +5788,13 @@ "type": { "target": "com.amazonaws.ecs#EnvironmentFileType", "traits": { - "smithy.api#documentation": "

The file type to use. Environment files are objects in Amazon S3. The only supported value is\n\t\t\t\ts3.

", + "smithy.api#documentation": "

The file type to use. Environment files are objects in Amazon S3. The only supported value\n\t\t\tis s3.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE format. Lines beginning with # are treated\n\t\t\tas comments and are ignored.

\n

If there are environment variables specified using the environment\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Use a file to pass environment variables to a container in the Amazon Elastic Container Service Developer Guide.

\n

Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations apply.

\n

You must use the following platforms for the Fargate launch type:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

Consider the following when using the Fargate launch type:

\n
    \n
  • \n

    The file is handled like a native Docker env-file.

    \n
  • \n
  • \n

    There is no support for shell escape handling.

    \n
  • \n
  • \n

    The container entry point interperts the VARIABLE values.

    \n
  • \n
" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE format. Lines beginning with # are treated\n\t\t\tas comments and are ignored.

\n

If there are environment variables specified using the environment\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Use a file to pass\n\t\t\t\tenvironment variables to a container in the Amazon Elastic Container Service Developer Guide.

\n

Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations\n\t\t\tapply.

\n

You must use the following platforms for the Fargate launch type:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

Consider the following when using the Fargate launch type:

\n
    \n
  • \n

    The file is handled like a native Docker env-file.

    \n
  • \n
  • \n

    There is no support for shell escape handling.

    \n
  • \n
  • \n

    The container entry point interperts the VARIABLE values.

    \n
  • \n
" } }, "com.amazonaws.ecs#EnvironmentFileType": { @@ -5794,7 +5827,7 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported\n\t\t\tvalue is 20 GiB and the maximum supported value is\n\t\t\t\t200 GiB.

", + "smithy.api#documentation": "

The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n\t\t\tsupported value is 20 GiB and the maximum supported value is\n\t\t\t\t200 GiB.

", "smithy.api#required": {} } } @@ -6251,7 +6284,7 @@ "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD to run the command arguments\n\t\t\tdirectly, or CMD-SHELL to run the command with the container's default\n\t\t\tshell.

\n

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.

\n

\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n

\n

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

\n

\n CMD-SHELL, curl -f http://localhost/ || exit 1\n

\n

An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck in the Create a container\n\t\t\tsection of the Docker Remote API.

", + "smithy.api#documentation": "

A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD to run the command arguments\n\t\t\tdirectly, or CMD-SHELL to run the command with the container's default\n\t\t\tshell.

\n

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.

\n

\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n

\n

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

\n

\n CMD-SHELL, curl -f http://localhost/ || exit 1\n

\n

An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck in tthe docker create-container command

", "smithy.api#required": {} } }, @@ -6281,7 +6314,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

\n \n

The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.

\n
\n

You can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.

\n

The health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.

\n

Amazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.

\n

The following describes the possible healthStatus values for a\n\t\t\tcontainer:

\n
    \n
  • \n

    \n HEALTHY-The container health check has passed\n\t\t\t\t\tsuccessfully.

    \n
  • \n
  • \n

    \n UNHEALTHY-The container health check has failed.

    \n
  • \n
  • \n

    \n UNKNOWN-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.

    \n
  • \n
\n

The following describes the possible healthStatus values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):

\n
    \n
  • \n

    \n UNHEALTHY-One or more essential containers have failed\n\t\t\t\t\ttheir health check.

    \n
  • \n
  • \n

    \n UNKNOWN-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY state.

    \n
  • \n
  • \n

    \n HEALTHY-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.

    \n
  • \n
\n

Consider the following task health example with 2 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tthe task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tthe task health is HEALTHY.

    \n
  • \n
\n

Consider the following task health example with 3 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is UNKNOWN, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is UNKNOWN, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tHEALTHY.

    \n
  • \n
\n

If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.

\n

The following are notes about container health check support:

\n
    \n
  • \n

    If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't\n\t\t\t\t\tcause a container to transition to an UNHEALTHY status. This is by design,\n\t\t\t\t\tto ensure that containers remain running during agent restarts or temporary\n\t\t\t\t\tunavailability. The health check status is the \"last heard from\" response from the Amazon ECS\n\t\t\t\t\tagent, so if the container was considered HEALTHY prior to the disconnect,\n\t\t\t\t\tthat status will remain until the agent reconnects and another health check occurs.\n\t\t\t\t\tThere are no assumptions made about the status of the container health checks.

    \n
  • \n
  • \n

    Container health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.

    \n
  • \n
  • \n

    Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0 or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.

    \n
  • \n
  • \n

    Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.

    \n
  • \n
" + "smithy.api#documentation": "

An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

\n \n

The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.

\n
\n

You can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.

\n

The health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.

\n

Amazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.

\n

The following describes the possible healthStatus values for a\n\t\t\tcontainer:

\n
    \n
  • \n

    \n HEALTHY-The container health check has passed\n\t\t\t\t\tsuccessfully.

    \n
  • \n
  • \n

    \n UNHEALTHY-The container health check has failed.

    \n
  • \n
  • \n

    \n UNKNOWN-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.

    \n
  • \n
\n

The following describes the possible healthStatus values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):

\n
    \n
  • \n

    \n UNHEALTHY-One or more essential containers have failed\n\t\t\t\t\ttheir health check.

    \n
  • \n
  • \n

    \n UNKNOWN-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY state.

    \n
  • \n
  • \n

    \n HEALTHY-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.

    \n
  • \n
\n

Consider the following task health example with 2 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tthe task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tthe task health is HEALTHY.

    \n
  • \n
\n

Consider the following task health example with 3 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is UNKNOWN, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is UNKNOWN, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tHEALTHY.

    \n
  • \n
\n

If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.

\n

The following are notes about container health check support:

\n
    \n
  • \n

    If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this\n\t\t\t\t\twon't cause a container to transition to an UNHEALTHY status. This\n\t\t\t\t\tis by design, to ensure that containers remain running during agent restarts or\n\t\t\t\t\ttemporary unavailability. The health check status is the \"last heard from\"\n\t\t\t\t\tresponse from the Amazon ECS agent, so if the container was considered\n\t\t\t\t\t\tHEALTHY prior to the disconnect, that status will remain until\n\t\t\t\t\tthe agent reconnects and another health check occurs. There are no assumptions\n\t\t\t\t\tmade about the status of the container health checks.

    \n
  • \n
  • \n

    Container health checks require version 1.17.0 or greater of the\n\t\t\t\t\tAmazon ECS container agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.

    \n
  • \n
  • \n

    Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0 or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.

    \n
  • \n
  • \n

    Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.

    \n
  • \n
" } }, "com.amazonaws.ecs#HealthStatus": { @@ -6490,6 +6523,12 @@ "smithy.api#default": 0 } }, + "com.amazonaws.ecs#IntegerList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecs#BoxedInteger" + } + }, "com.amazonaws.ecs#InvalidParameterException": { "type": "structure", "members": { @@ -6534,18 +6573,18 @@ "add": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-add option to docker\n\t\t\t\trun.

\n \n

Tasks launched on Fargate only support adding the SYS_PTRACE kernel\n\t\t\t\tcapability.

\n
\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" + "smithy.api#documentation": "

The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd in the docker create-container command and the\n\t\t\t\t--cap-add option to docker\n\t\t\t\trun.

\n \n

Tasks launched on Fargate only support adding the SYS_PTRACE kernel\n\t\t\t\tcapability.

\n
\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" } }, "drop": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-drop option to docker\n\t\t\t\trun.

\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" + "smithy.api#documentation": "

The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop in the docker create-container command and the\n\t\t\t\t--cap-drop option to docker\n\t\t\t\trun.

\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" } } }, "traits": { - "smithy.api#documentation": "

The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more information about the default capabilities\n\t\t\tand the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run\n\t\t\t\treference. For more detailed information about these Linux capabilities,\n\t\t\tsee the capabilities(7) Linux manual page.

" + "smithy.api#documentation": "

The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more detailed information about these Linux capabilities,\n\t\t\tsee the capabilities(7) Linux manual page.

" } }, "com.amazonaws.ecs#KeyValuePair": { @@ -6618,7 +6657,7 @@ "devices": { "target": "com.amazonaws.ecs#DevicesList", "traits": { - "smithy.api#documentation": "

Any host devices to expose to the container. This parameter maps to\n\t\t\t\tDevices in the Create a container section of the\n\t\t\tDocker Remote API and the --device option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices parameter isn't supported.

\n
" + "smithy.api#documentation": "

Any host devices to expose to the container. This parameter maps to\n\t\t\tDevices in tthe docker create-container command and the --device option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices parameter isn't supported.

\n
" } }, "initProcessEnabled": { @@ -6630,13 +6669,13 @@ "sharedMemorySize": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The value for the size (in MiB) of the /dev/shm volume. This parameter\n\t\t\tmaps to the --shm-size option to docker\n\t\t\t\trun.

\n \n

If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize parameter is not supported.

\n
" + "smithy.api#documentation": "

The value for the size (in MiB) of the /dev/shm volume. This parameter\n\t\t\tmaps to the --shm-size option to docker\n\t\t\t\trun.

\n \n

If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize parameter is not supported.

\n
" } }, "tmpfs": { "target": "com.amazonaws.ecs#TmpfsList", "traits": { - "smithy.api#documentation": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs parameter isn't supported.

\n
" + "smithy.api#documentation": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs parameter isn't supported.

\n
" } }, "maxSwap": { @@ -6648,7 +6687,7 @@ "swappiness": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness value of 0 will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness value of 100 will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0 and 100. If the swappiness parameter is not\n\t\t\tspecified, a default value of 60 is used. If a value is not specified for\n\t\t\t\tmaxSwap then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness parameter isn't supported.

\n

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't\n\t\t\t\tsupported.

\n
" + "smithy.api#documentation": "

This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness value of 0 will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness value of 100 will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0 and 100. If the swappiness parameter is not\n\t\t\tspecified, a default value of 60 is used. If a value is not specified for\n\t\t\t\tmaxSwap then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness parameter isn't supported.

\n

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't\n\t\t\t\tsupported.

\n
" } } }, @@ -6679,10 +6718,11 @@ "smithy.api#documentation": "

Lists the account settings for a specified principal.

", "smithy.api#examples": [ { - "title": "To view your effective account settings", - "documentation": "This example displays the effective account settings for your account.", + "title": "To view the effective account settings for a specific IAM user or IAM role", + "documentation": "This example displays the effective account settings for the specified user or role.", "input": { - "effectiveSettings": true + "effectiveSettings": true, + "principalArn": "arn:aws:iam:::user/principalName" }, "output": { "settings": [ @@ -6705,11 +6745,10 @@ } }, { - "title": "To view the effective account settings for a specific IAM user or IAM role", - "documentation": "This example displays the effective account settings for the specified user or role.", + "title": "To view your effective account settings", + "documentation": "This example displays the effective account settings for your account.", "input": { - "effectiveSettings": true, - "principalArn": "arn:aws:iam:::user/principalName" + "effectiveSettings": true }, "output": { "settings": [ @@ -6935,7 +6974,20 @@ "outputToken": "nextToken", "items": "clusterArns", "pageSize": "maxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListClustersSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.ecs#ListClustersRequest": { @@ -7358,25 +7410,25 @@ "smithy.api#documentation": "

Returns a list of task definition families that are registered to your account. This\n\t\t\tlist includes task definition families that no longer have any ACTIVE task\n\t\t\tdefinition revisions.

\n

You can filter out task definition families that don't contain any ACTIVE\n\t\t\ttask definition revisions by setting the status parameter to\n\t\t\t\tACTIVE. You can also filter the results with the\n\t\t\t\tfamilyPrefix parameter.

", "smithy.api#examples": [ { - "title": "To list your registered task definition families", - "documentation": "This example lists all of your registered task definition families.", + "title": "To filter your registered task definition families", + "documentation": "This example lists the task definition revisions that start with \"hpcc\".", + "input": { + "familyPrefix": "hpcc" + }, "output": { "families": [ - "node-js-app", - "web-timer", "hpcc", "hpcc-c4-8xlarge" ] } }, { - "title": "To filter your registered task definition families", - "documentation": "This example lists the task definition revisions that start with \"hpcc\".", - "input": { - "familyPrefix": "hpcc" - }, + "title": "To list your registered task definition families", + "documentation": "This example lists all of your registered task definition families.", "output": { "families": [ + "node-js-app", + "web-timer", "hpcc", "hpcc-c4-8xlarge" ] @@ -7466,12 +7518,13 @@ "smithy.api#documentation": "

Returns a list of task definitions that are registered to your account. You can filter\n\t\t\tthe results by family name with the familyPrefix parameter or by status\n\t\t\twith the status parameter.

", "smithy.api#examples": [ { - "title": "To list your registered task definitions", - "documentation": "This example lists all of your registered task definitions.", + "title": "To list the registered task definitions in a family", + "documentation": "This example lists the task definition revisions of a specified family.", + "input": { + "familyPrefix": "wordpress" + }, "output": { "taskDefinitionArns": [ - "arn:aws:ecs:us-east-1::task-definition/sleep300:2", - "arn:aws:ecs:us-east-1::task-definition/sleep360:1", "arn:aws:ecs:us-east-1::task-definition/wordpress:3", "arn:aws:ecs:us-east-1::task-definition/wordpress:4", "arn:aws:ecs:us-east-1::task-definition/wordpress:5", @@ -7480,13 +7533,12 @@ } }, { - "title": "To list the registered task definitions in a family", - "documentation": "This example lists the task definition revisions of a specified family.", - "input": { - "familyPrefix": "wordpress" - }, + "title": "To list your registered task definitions", + "documentation": "This example lists all of your registered task definitions.", "output": { "taskDefinitionArns": [ + "arn:aws:ecs:us-east-1::task-definition/sleep300:2", + "arn:aws:ecs:us-east-1::task-definition/sleep360:1", "arn:aws:ecs:us-east-1::task-definition/wordpress:3", "arn:aws:ecs:us-east-1::task-definition/wordpress:4", "arn:aws:ecs:us-east-1::task-definition/wordpress:5", @@ -7590,28 +7642,28 @@ "smithy.api#documentation": "

Returns a list of tasks. You can filter the results by cluster, task definition\n\t\t\tfamily, container instance, launch type, what IAM principal started the task, or by\n\t\t\tthe desired status of the task.

\n

Recently stopped tasks might appear in the returned results.

", "smithy.api#examples": [ { - "title": "To list the tasks in a cluster", - "documentation": "This example lists all of the tasks in a cluster.", + "title": "To list the tasks on a particular container instance", + "documentation": "This example lists the tasks of a specified container instance. Specifying a ``containerInstance`` value limits the results to tasks that belong to that container instance.", "input": { - "cluster": "default" + "cluster": "default", + "containerInstance": "f6bbb147-5370-4ace-8c73-c7181ded911f" }, "output": { "taskArns": [ - "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84", - "arn:aws:ecs:us-east-1:012345678910:task/default/6b809ef6-c67e-4467-921f-ee261c15a0a1" + "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84" ] } }, { - "title": "To list the tasks on a particular container instance", - "documentation": "This example lists the tasks of a specified container instance. Specifying a ``containerInstance`` value limits the results to tasks that belong to that container instance.", + "title": "To list the tasks in a cluster", + "documentation": "This example lists all of the tasks in a cluster.", "input": { - "cluster": "default", - "containerInstance": "f6bbb147-5370-4ace-8c73-c7181ded911f" + "cluster": "default" }, "output": { "taskArns": [ - "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84" + "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84", + "arn:aws:ecs:us-east-1:012345678910:task/default/6b809ef6-c67e-4467-921f-ee261c15a0a1" ] } } @@ -7750,7 +7802,7 @@ "logDriver": { "target": "com.amazonaws.ecs#LogDriver", "traits": { - "smithy.api#documentation": "

The log driver to use for the container.

\n

For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\tsplunk, and awsfirelens.

\n

For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\tjson-file, journald,\n\t\t\t\tlogentries,syslog, splunk, and\n\t\t\t\tawsfirelens.

\n

For more information about using the awslogs log driver, see Using\n\t\t\t\tthe awslogs log driver in the Amazon Elastic Container Service Developer Guide.

\n

For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.

\n \n

If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.

\n
", + "smithy.api#documentation": "

The log driver to use for the container.

\n

For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\tsplunk, and awsfirelens.

\n

For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\tjson-file, journald, syslog,\n\t\t\t\tsplunk, and awsfirelens.

\n

For more information about using the awslogs log driver, see Send\n\t\t\t\tAmazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide.

\n

For more information about using the awsfirelens log driver, see Send\n\t\t\t\tAmazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner.

\n \n

If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.

\n
", "smithy.api#required": {} } }, @@ -7768,7 +7820,7 @@ } }, "traits": { - "smithy.api#documentation": "

The log configuration for the container. This parameter maps to LogConfig\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--log-driver option to \n docker\n\t\t\t\t\trun\n .

\n

By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition. For more information\n\t\t\tabout the options for different supported log drivers, see Configure logging\n\t\t\t\tdrivers in the Docker documentation.

\n

Understand the following when specifying a log configuration for your\n\t\t\tcontainers.

\n
    \n
  • \n

    Amazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.

    \n

    For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n

    For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\t\t\tjson-file, journald,\n\t\t\t\t\t\tlogentries,syslog, splunk, and\n\t\t\t\t\t\tawsfirelens.

    \n
  • \n
  • \n

    This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.

    \n
  • \n
  • \n

    For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.

    \n
  • \n
" + "smithy.api#documentation": "

The log configuration for the container. This parameter maps to LogConfig\n\t\t\tin the docker create-container command and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\t\trun.

\n

By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition.

\n

Understand the following when specifying a log configuration for your\n\t\t\tcontainers.

\n
    \n
  • \n

    Amazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.

    \n

    For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n

    For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\t\t\tjson-file, journald,syslog,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n
  • \n
  • \n

    This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.

    \n
  • \n
  • \n

    For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.

    \n
  • \n
" } }, "com.amazonaws.ecs#LogConfigurationOptionsMap": { @@ -8539,7 +8591,7 @@ } }, "traits": { - "smithy.api#documentation": "

Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.

\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, specify the exposed ports using containerPort. The\n\t\t\t\thostPort can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort.

\n

Most fields of this parameter (containerPort, hostPort,\n\t\t\t\tprotocol) maps to PortBindings in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--publish option to \n docker\n\t\t\t\t\trun\n . If the network mode of a task definition is set to\n\t\t\t\thost, host ports must either be undefined or match the container port\n\t\t\tin the port mapping.

\n \n

You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.

\n
\n

After a task reaches the RUNNING status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings section of\n\t\t\t\tDescribeTasks API responses.

" + "smithy.api#documentation": "

Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.

\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, specify the exposed ports using containerPort. The\n\t\t\t\thostPort can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort.

\n

Most fields of this parameter (containerPort, hostPort,\n\t\t\tprotocol) maps to PortBindings in the docker create-container command and the\n\t\t\t\t--publish option to docker\n\t\t\t\t\trun. If the network mode of a task definition is set to\n\t\t\t\thost, host ports must either be undefined or match the container port\n\t\t\tin the port mapping.

\n \n

You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.

\n
\n

After a task reaches the RUNNING status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings section of\n\t\t\t\tDescribeTasks API responses.

" } }, "com.amazonaws.ecs#PortMappingList": { @@ -8680,31 +8732,31 @@ "smithy.api#documentation": "

Modifies an account setting. Account settings are set on a per-Region basis.

\n

If you change the root user account setting, the default settings are reset for users and\n\t\t\troles that do not have specified individual account settings. For more information, see\n\t\t\t\tAccount\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#examples": [ { - "title": "To modify your account settings", - "documentation": "This example modifies your account settings to opt in to the new ARN and resource ID format for Amazon ECS services. If you’re using this command as the root user, then changes apply to the entire AWS account, unless an IAM user or role explicitly overrides these settings for themselves.", + "title": "To modify the account settings for a specific IAM user or IAM role", + "documentation": "This example modifies the account setting for a specific IAM user or IAM role to opt in to the new ARN and resource ID format for Amazon ECS container instances. If you’re using this command as the root user, then changes apply to the entire AWS account, unless an IAM user or role explicitly overrides these settings for themselves.", "input": { - "name": "serviceLongArnFormat", - "value": "enabled" + "name": "containerInstanceLongArnFormat", + "value": "enabled", + "principalArn": "arn:aws:iam:::user/principalName" }, "output": { "setting": { - "name": "serviceLongArnFormat", + "name": "containerInstanceLongArnFormat", "value": "enabled", "principalArn": "arn:aws:iam:::user/principalName" } } }, { - "title": "To modify the account settings for a specific IAM user or IAM role", - "documentation": "This example modifies the account setting for a specific IAM user or IAM role to opt in to the new ARN and resource ID format for Amazon ECS container instances. If you’re using this command as the root user, then changes apply to the entire AWS account, unless an IAM user or role explicitly overrides these settings for themselves.", + "title": "To modify your account settings", + "documentation": "This example modifies your account settings to opt in to the new ARN and resource ID format for Amazon ECS services. If you’re using this command as the root user, then changes apply to the entire AWS account, unless an IAM user or role explicitly overrides these settings for themselves.", "input": { - "name": "containerInstanceLongArnFormat", - "value": "enabled", - "principalArn": "arn:aws:iam:::user/principalName" + "name": "serviceLongArnFormat", + "value": "enabled" }, "output": { "setting": { - "name": "containerInstanceLongArnFormat", + "name": "serviceLongArnFormat", "value": "enabled", "principalArn": "arn:aws:iam:::user/principalName" } @@ -9086,7 +9138,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers a new task definition from the supplied family and\n\t\t\t\tcontainerDefinitions. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a role for your task with the taskRoleArn parameter. When\n\t\t\tyou specify a role for a task, its containers can then use the latest versions of the\n\t\t\tCLI or SDKs to make API requests to the Amazon Web Services services that are specified in the\n\t\t\tpolicy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode parameter. The available network modes correspond to\n\t\t\tthose described in Network\n\t\t\t\tsettings in the Docker run reference. If you specify the awsvpc\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with\n\t\t\tthe task definition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

Registers a new task definition from the supplied family and\n\t\t\t\tcontainerDefinitions. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a role for your task with the taskRoleArn parameter. When\n\t\t\tyou specify a role for a task, its containers can then use the latest versions of the\n\t\t\tCLI or SDKs to make API requests to the Amazon Web Services services that are specified in the\n\t\t\tpolicy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode parameter. If you specify the awsvpc\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with\n\t\t\tthe task definition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

", "smithy.api#examples": [ { "title": "To register a task definition", @@ -9157,13 +9209,13 @@ "executionRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required\n depending on the requirements of your task. For more information, see Amazon ECS task\n execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" } }, "networkMode": { "target": "com.amazonaws.ecs#NetworkMode", "traits": { - "smithy.api#documentation": "

The Docker networking mode to use for the containers in the task. The valid values are\n none, bridge, awsvpc, and host.\n If no network mode is specified, the default is bridge.

\n

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network\n mode is set to none, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host and awsvpc network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge mode.

\n

With the host and awsvpc network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host\n network mode) or the attached elastic network interface port (for the\n awsvpc network mode), so you cannot take advantage of dynamic host port\n mappings.

\n \n

When using the host network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.

\n
\n

If the network mode is awsvpc, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.

\n

If the network mode is host, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.

\n

For more information, see Network\n settings in the Docker run reference.

" + "smithy.api#documentation": "

The Docker networking mode to use for the containers in the task. The valid values are\n none, bridge, awsvpc, and host.\n If no network mode is specified, the default is bridge.

\n

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network\n mode is set to none, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host and awsvpc network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge mode.

\n

With the host and awsvpc network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host\n network mode) or the attached elastic network interface port (for the\n awsvpc network mode), so you cannot take advantage of dynamic host port\n mappings.

\n \n

When using the host network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.

\n
\n

If the network mode is awsvpc, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.

\n

If the network mode is host, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.

" } }, "containerDefinitions": { @@ -9212,13 +9264,13 @@ "pidMode": { "target": "com.amazonaws.ecs#PidMode", "traits": { - "smithy.api#documentation": "

The process namespace to use for the containers in the task. The valid\n values are host or task. On Fargate for\n Linux containers, the only valid value is task. For\n example, monitoring sidecars might need pidMode to access\n information about other containers running in the same task.

\n

If host is specified, all containers within the tasks\n that specified the host PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.

\n

If task is specified, all containers within the specified\n task share the same process namespace.

\n

If no value is specified, the\n default is a private namespace for each container. For more information,\n see PID settings in the Docker run\n reference.

\n

If the host PID mode is used, there's a heightened risk\n of undesired process namespace exposure. For more information, see\n Docker security.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" + "smithy.api#documentation": "

The process namespace to use for the containers in the task. The valid\n values are host or task. On Fargate for\n Linux containers, the only valid value is task. For\n example, monitoring sidecars might need pidMode to access\n information about other containers running in the same task.

\n

If host is specified, all containers within the tasks\n that specified the host PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.

\n

If task is specified, all containers within the specified\n task share the same process namespace.

\n

If no value is specified, the\n default is a private namespace for each container.

\n

If the host PID mode is used, there's a heightened risk\n of undesired process namespace exposure.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" } }, "ipcMode": { "target": "com.amazonaws.ecs#IpcMode", "traits": { - "smithy.api#documentation": "

The IPC resource namespace to use for the containers in the task. The valid values are\n host, task, or none. If host is\n specified, then all containers within the tasks that specified the host IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task is specified, all containers within the specified task\n share the same IPC resources. If none is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.

\n

If the host IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.

\n

If you are setting namespaced kernel parameters using systemControls for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace related\n systemControls are not supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace related\n systemControls will apply to all containers within a\n task.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" + "smithy.api#documentation": "

The IPC resource namespace to use for the containers in the task. The valid values are\n host, task, or none. If host is\n specified, then all containers within the tasks that specified the host IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task is specified, all containers within the specified task\n share the same IPC resources. If none is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance.

\n

If the host IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose.

\n

If you are setting namespaced kernel parameters using systemControls for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace related\n systemControls are not supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace related\n systemControls will apply to all containers within a\n task.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" } }, "proxyConfiguration": { @@ -9380,7 +9432,7 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value for the specified resource type.

\n

When the type is GPU, the value is the number of physical GPUs the\n\t\t\tAmazon ECS container agent reserves for the container. The number of GPUs that's reserved for\n\t\t\tall containers in a task can't exceed the number of available GPUs on the container\n\t\t\tinstance that the task is launched on.

\n

When the type is InferenceAccelerator, the value matches\n\t\t\tthe deviceName for an InferenceAccelerator specified in a task definition.

", + "smithy.api#documentation": "

The value for the specified resource type.

\n

When the type is GPU, the value is the number of physical\n\t\t\t\tGPUs the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.

\n

When the type is InferenceAccelerator, the value matches the\n\t\t\t\tdeviceName for an InferenceAccelerator specified in a task definition.

", "smithy.api#required": {} } }, @@ -9597,7 +9649,7 @@ "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy value. Up to 128 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, then the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" + "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy value. Up to 128 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, then the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" } }, "tags": { @@ -9609,7 +9661,7 @@ "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull ARN of the task definition to run. If a revision isn't specified,\n\t\t\tthe latest ACTIVE revision is used.

\n

The full ARN value must match the value that you specified as the\n\t\t\t\tResource of the principal's permissions policy.

\n

When you specify a task definition, you must either specify a specific revision, or\n\t\t\tall revisions in the ARN.

\n

To specify a specific revision, include the revision number in the ARN. For example,\n\t\t\tto specify revision 2, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2.

\n

To specify all revisions, use the wildcard (*) in the ARN. For example, to specify all\n\t\t\trevisions, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*.

\n

For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull ARN of the task definition to run. If a revision isn't specified,\n\t\t\tthe latest ACTIVE revision is used.

\n

The full ARN value must match the value that you specified as the\n\t\t\t\tResource of the principal's permissions policy.

\n

When you specify a task definition, you must either specify a specific revision, or\n\t\t\tall revisions in the ARN.

\n

To specify a specific revision, include the revision number in the ARN. For example,\n\t\t\tto specify revision 2, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2.

\n

To specify all revisions, use the wildcard (*) in the ARN. For example, to specify\n\t\t\tall revisions, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*.

\n

For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#required": {} } }, @@ -9637,7 +9689,7 @@ "tasks": { "target": "com.amazonaws.ecs#Tasks", "traits": { - "smithy.api#documentation": "

A full description of the tasks that were run. The tasks that were successfully placed\n\t\t\ton your cluster are described here.

\n

" + "smithy.api#documentation": "

A full description of the tasks that were run. The tasks that were successfully placed\n\t\t\ton your cluster are described here.

" } }, "failures": { @@ -10646,7 +10698,7 @@ "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy value. Up to 36 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" + "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy value. Up to 36 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" } }, "tags": { @@ -10722,7 +10774,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by sending\n\t\t\ta CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by\n\t\t\tsending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
" } }, "com.amazonaws.ecs#StopTaskRequest": { @@ -11063,7 +11115,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

\n

We don't recommend that you specify network-related systemControls\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc or host network mode. Doing this has the following\n\t\t\tdisadvantages:

\n
    \n
  • \n

    For tasks that use the awsvpc network mode including Fargate,\n\t\t\t\t\tif you set systemControls for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls take effect.

    \n
  • \n
  • \n

    For tasks that use the host network mode, the network namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
\n

If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls values apply to all containers within a\n\t\t\t\t\ttask.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" + "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls in tthe docker create-container command and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

\n

We don't recommend that you specify network-related systemControls\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc or host network mode. Doing this has the following\n\t\t\tdisadvantages:

\n
    \n
  • \n

    For tasks that use the awsvpc network mode including Fargate,\n\t\t\t\t\tif you set systemControls for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls take effect.

    \n
  • \n
  • \n

    For tasks that use the host network mode, the network namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
\n

If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls values apply to all containers within a\n\t\t\t\t\ttask.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" } }, "com.amazonaws.ecs#SystemControls": { @@ -11230,7 +11282,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified target wasn't found. You can view your available container instances\n\t\t\twith ListContainerInstances. Amazon ECS container instances are\n\t\t\tcluster-specific and Region-specific.

", + "smithy.api#documentation": "

The specified target wasn't found. You can view your available container instances\n\t\t\twith ListContainerInstances. Amazon ECS container instances are cluster-specific and\n\t\t\tRegion-specific.

", "smithy.api#error": "client" } }, @@ -11501,19 +11553,19 @@ "taskRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS\n\t\t\t\tTask Role in the Amazon Elastic Container Service Developer Guide.

\n

IAM roles for tasks on Windows require that the -EnableTaskIAMRole\n\t\t\toption is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some\n\t\t\tconfiguration code to use the feature. For more information, see Windows IAM roles\n\t\t\t\tfor tasks in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For informationabout the required\n\t\t\tIAM roles for Amazon ECS, see IAM\n\t\t\t\troles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" } }, "executionRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required\n depending on the requirements of your task. For more information, see Amazon ECS task\n execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" } }, "networkMode": { "target": "com.amazonaws.ecs#NetworkMode", "traits": { - "smithy.api#documentation": "

The Docker networking mode to use for the containers in the task. The valid values are\n none, bridge, awsvpc, and host.\n If no network mode is specified, the default is bridge.

\n

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network\n mode is set to none, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host and awsvpc network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge mode.

\n

With the host and awsvpc network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host\n network mode) or the attached elastic network interface port (for the\n awsvpc network mode), so you cannot take advantage of dynamic host port\n mappings.

\n \n

When using the host network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.

\n
\n

If the network mode is awsvpc, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.

\n

If the network mode is host, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.

\n

For more information, see Network\n settings in the Docker run reference.

" + "smithy.api#documentation": "

The Docker networking mode to use for the containers in the task. The valid values are\n none, bridge, awsvpc, and host.\n If no network mode is specified, the default is bridge.

\n

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network\n mode is set to none, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host and awsvpc network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge mode.

\n

With the host and awsvpc network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host\n network mode) or the attached elastic network interface port (for the\n awsvpc network mode), so you cannot take advantage of dynamic host port\n mappings.

\n \n

When using the host network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.

\n
\n

If the network mode is awsvpc, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.

\n

If the network mode is host, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.

" } }, "revision": { @@ -11568,7 +11620,7 @@ "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The number of cpu units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory parameter.

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The number of cpu units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory parameter.

\n

If you use the EC2 launch type, this field is optional. Supported values\n\t\t\tare between 128 CPU units (0.125 vCPUs) and 10240\n\t\t\tCPU units (10 vCPUs).

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "memory": { @@ -11586,13 +11638,13 @@ "pidMode": { "target": "com.amazonaws.ecs#PidMode", "traits": { - "smithy.api#documentation": "

The process namespace to use for the containers in the task. The valid\n values are host or task. On Fargate for\n Linux containers, the only valid value is task. For\n example, monitoring sidecars might need pidMode to access\n information about other containers running in the same task.

\n

If host is specified, all containers within the tasks\n that specified the host PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.

\n

If task is specified, all containers within the specified\n task share the same process namespace.

\n

If no value is specified, the\n default is a private namespace for each container. For more information,\n see PID settings in the Docker run\n reference.

\n

If the host PID mode is used, there's a heightened risk\n of undesired process namespace exposure. For more information, see\n Docker security.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" + "smithy.api#documentation": "

The process namespace to use for the containers in the task. The valid\n values are host or task. On Fargate for\n Linux containers, the only valid value is task. For\n example, monitoring sidecars might need pidMode to access\n information about other containers running in the same task.

\n

If host is specified, all containers within the tasks\n that specified the host PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.

\n

If task is specified, all containers within the specified\n task share the same process namespace.

\n

If no value is specified, the\n default is a private namespace for each container.

\n

If the host PID mode is used, there's a heightened risk\n of undesired process namespace exposure.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" } }, "ipcMode": { "target": "com.amazonaws.ecs#IpcMode", "traits": { - "smithy.api#documentation": "

The IPC resource namespace to use for the containers in the task. The valid values are\n host, task, or none. If host is\n specified, then all containers within the tasks that specified the host IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task is specified, all containers within the specified task\n share the same IPC resources. If none is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.

\n

If the host IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.

\n

If you are setting namespaced kernel parameters using systemControls for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace related\n systemControls are not supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace related\n systemControls will apply to all containers within a\n task.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" + "smithy.api#documentation": "

The IPC resource namespace to use for the containers in the task. The valid values are\n host, task, or none. If host is\n specified, then all containers within the tasks that specified the host IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task is specified, all containers within the specified task\n share the same IPC resources. If none is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance.

\n

If the host IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose.

\n

If you are setting namespaced kernel parameters using systemControls for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace related\n systemControls are not supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace related\n systemControls will apply to all containers within a\n task.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" } }, "proxyConfiguration": { @@ -11743,13 +11795,13 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The total amount, in GiB, of the ephemeral storage to set for the task. The minimum \t\t\n\t\t\tsupported value is 20 GiB and the maximum supported value is\u2028 200 \n\t\t\tGiB.

" + "smithy.api#documentation": "

The total amount, in GiB, of the ephemeral storage to set for the task. The minimum\n\t\t\tsupported value is 20 GiB and the maximum supported value is\u2028\n\t\t\t\t200 GiB.

" } }, "kmsKeyId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for the task.

" + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for the\n\t\t\ttask.

" } } }, @@ -12313,7 +12365,7 @@ } }, "traits": { - "smithy.api#documentation": "

The ulimit settings to pass to the container.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 1024 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

You can specify the ulimit settings for a container in a task\n\t\t\tdefinition.

" + "smithy.api#documentation": "

The ulimit settings to pass to the container.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

You can specify the ulimit settings for a container in a task\n\t\t\tdefinition.

" } }, "com.amazonaws.ecs#UlimitList": { @@ -12793,7 +12845,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the status of an Amazon ECS container instance.

\n

Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

\n \n

A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

\n
\n

When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

\n

Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and are reported as healthy by the\n\t\t\t\t\tload balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

    \n
  • \n
\n

Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

\n

A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

\n

When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

" + "smithy.api#documentation": "

Modifies the status of an Amazon ECS container instance.

\n

Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

\n \n

A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

\n
\n

When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

\n

Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and are reported as healthy by the\n\t\t\t\t\tload balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

    \n
  • \n
\n

Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

\n

A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

\n

When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

" } }, "com.amazonaws.ecs#UpdateContainerInstancesStateRequest": { @@ -12906,20 +12958,20 @@ "smithy.api#documentation": "

Modifies the parameters of a service.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For services using the rolling update (ECS) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update\n\t\t\tyour volume configurations and trigger a new deployment.\n\t\t\t\tvolumeConfigurations is only supported for REPLICA service and not\n\t\t\tDAEMON service. If you leave volumeConfigurations\n null, it doesn't trigger a new deployment. For more infomation on volumes,\n\t\t\tsee Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

For services using the blue/green (CODE_DEPLOY) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.

\n

For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.

\n

You can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount parameter.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

If you have updated the container image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.

\n \n

If your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.

\n
\n

You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent and\n\t\t\t\tmaximumPercent, to determine the deployment strategy.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING state and are reported\n\t\t\t\t\tas healthy by the load balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).

    \n
  • \n
\n

When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM and a 30-second timeout. After this,\n\t\t\t\tSIGKILL is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL is sent.

\n

When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.

\n
    \n
  • \n

    Determine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.

    \n
  • \n
  • \n

    By default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.

    \n
      \n
    • \n

      Sort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.

      \n
    • \n
    • \n

      Place the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.

      \n
    • \n
    \n
  • \n
\n

When the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:

\n
    \n
  • \n

    Sort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.

    \n
  • \n
  • \n

    Stop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.

    \n
  • \n
\n \n

You must have a service-linked role when you update any of the following service\n\t\t\t\tproperties:

\n
    \n
  • \n

    \n loadBalancers,

    \n
  • \n
  • \n

    \n serviceRegistries\n

    \n
  • \n
\n

For more information about the role see the CreateService request\n\t\t\t\tparameter \n role\n .

\n
", "smithy.api#examples": [ { - "title": "To change the task definition used in a service", - "documentation": "This example updates the my-http-service service to use the amazon-ecs-sample task definition.", + "title": "To change the number of tasks in a service", + "documentation": "This example updates the desired count of the my-http-service service to 10.", "input": { "service": "my-http-service", - "taskDefinition": "amazon-ecs-sample" + "desiredCount": 10 }, "output": {} }, { - "title": "To change the number of tasks in a service", - "documentation": "This example updates the desired count of the my-http-service service to 10.", + "title": "To change the task definition used in a service", + "documentation": "This example updates the my-http-service service to use the amazon-ecs-sample task definition.", "input": { "service": "my-http-service", - "desiredCount": 10 + "taskDefinition": "amazon-ecs-sample" }, "output": {} } @@ -13183,36 +13235,35 @@ "smithy.api#documentation": "

Updates the protection status of a task. You can set protectionEnabled to\n\t\t\t\ttrue to protect your task from termination during scale-in events from\n\t\t\t\tService\n\t\t\t\tAutoscaling or deployments.

\n

Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the\n\t\t\t\tprotectionEnabled property making the task eligible for termination by\n\t\t\ta subsequent scale-in event.

\n

You can specify a custom expiration period for task protection from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). To specify the custom expiration period, set the\n\t\t\t\texpiresInMinutes property. The expiresInMinutes property\n\t\t\tis always reset when you invoke this operation for a task that already has\n\t\t\t\tprotectionEnabled set to true. You can keep extending the\n\t\t\tprotection expiration period of a task by invoking this operation repeatedly.

\n

To learn more about Amazon ECS task protection, see Task scale-in\n\t\t\t\tprotection in the \n Amazon Elastic Container Service Developer Guide\n .

\n \n

This operation is only supported for tasks belonging to an Amazon ECS service. Invoking\n\t\t\t\tthis operation for a standalone task will result in an TASK_NOT_VALID\n\t\t\t\tfailure. For more information, see API failure\n\t\t\t\t\treasons.

\n
\n \n

If you prefer to set task protection from within the container, we recommend using\n\t\t\t\tthe Task scale-in protection endpoint.

\n
", "smithy.api#examples": [ { - "title": "To set task scale-in protection for a task for 60 minutes", - "documentation": "This example enables scale-in protection for a task for 60 minutes.", + "title": "To remove task scale-in protection", + "documentation": "This example removes scale-in protection for a task.", "input": { "cluster": "test-task-protection", "tasks": [ "b8b1cf532d0e46ba8d44a40d1de16772" ], - "protectionEnabled": true, - "expiresInMinutes": 60 + "protectionEnabled": false }, "output": { "protectedTasks": [ { "taskArn": "arn:aws:ecs:us-west-2:012345678910:task/default/b8b1cf532d0e46ba8d44a40d1de16772", - "protectionEnabled": true, - "expirationDate": "2022-11-02T06:56:32.553Z" + "protectionEnabled": false } ], "failures": [] } }, { - "title": "To set task scale-in protection for the default time period in minutes", - "documentation": "This example enables task scale-in protection for a task, without specifying the expiresInMinutes parameter, for the default protection period of 120 minutes.", + "title": "To set task scale-in protection for a task for 60 minutes", + "documentation": "This example enables scale-in protection for a task for 60 minutes.", "input": { "cluster": "test-task-protection", "tasks": [ "b8b1cf532d0e46ba8d44a40d1de16772" ], - "protectionEnabled": true + "protectionEnabled": true, + "expiresInMinutes": 60 }, "output": { "protectedTasks": [ @@ -13226,20 +13277,21 @@ } }, { - "title": "To remove task scale-in protection", - "documentation": "This example removes scale-in protection for a task.", + "title": "To set task scale-in protection for the default time period in minutes", + "documentation": "This example enables task scale-in protection for a task, without specifying the expiresInMinutes parameter, for the default protection period of 120 minutes.", "input": { "cluster": "test-task-protection", "tasks": [ "b8b1cf532d0e46ba8d44a40d1de16772" ], - "protectionEnabled": false + "protectionEnabled": true }, "output": { "protectedTasks": [ { "taskArn": "arn:aws:ecs:us-west-2:012345678910:task/default/b8b1cf532d0e46ba8d44a40d1de16772", - "protectionEnabled": false + "protectionEnabled": true, + "expirationDate": "2022-11-02T06:56:32.553Z" } ], "failures": [] diff --git a/models/eks.json b/models/eks.json index ab611be9b6..c699f3042d 100644 --- a/models/eks.json +++ b/models/eks.json @@ -115,6 +115,18 @@ "traits": { "smithy.api#enumValue": "AL2023_ARM_64_STANDARD" } + }, + "AL2023_x86_64_NEURON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AL2023_x86_64_NEURON" + } + }, + "AL2023_x86_64_NVIDIA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AL2023_x86_64_NVIDIA" + } } } }, @@ -2553,6 +2565,12 @@ "traits": { "smithy.api#documentation": "

The access configuration for the cluster.

" } + }, + "upgradePolicy": { + "target": "com.amazonaws.eks#UpgradePolicyResponse", + "traits": { + "smithy.api#documentation": "

This value indicates if extended support is enabled or disabled for the cluster.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + } } }, "traits": { @@ -3331,6 +3349,12 @@ "traits": { "smithy.api#documentation": "

If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

\n

The default networking addons include vpc-cni, coredns, and kube-proxy.

\n

Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

" } + }, + "upgradePolicy": { + "target": "com.amazonaws.eks#UpgradePolicyRequest", + "traits": { + "smithy.api#documentation": "

New clusters, by default, have extended support enabled. You can disable extended support when creating a cluster by setting this value to STANDARD.

" + } } }, "traits": { @@ -9428,6 +9452,23 @@ "target": "com.amazonaws.eks#String" } }, + "com.amazonaws.eks#SupportType": { + "type": "enum", + "members": { + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STANDARD" + } + }, + "EXTENDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXTENDED" + } + } + } + }, "com.amazonaws.eks#TagKey": { "type": "string", "traits": { @@ -9997,6 +10038,12 @@ "traits": { "smithy.api#documentation": "

The access configuration for the cluster.

" } + }, + "upgradePolicy": { + "target": "com.amazonaws.eks#UpgradePolicyRequest", + "traits": { + "smithy.api#documentation": "

You can enable or disable extended support for clusters currently on standard support. You cannot disable extended support once it starts. You must enable extended support before your cluster exits standard support.

" + } } }, "traits": { @@ -10591,6 +10638,12 @@ "traits": { "smithy.api#enumValue": "PodIdentityAssociations" } + }, + "UPGRADE_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UpgradePolicy" + } } } }, @@ -10793,9 +10846,43 @@ "traits": { "smithy.api#enumValue": "AccessConfigUpdate" } + }, + "UPGRADE_POLICY_UPDATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UpgradePolicyUpdate" + } } } }, + "com.amazonaws.eks#UpgradePolicyRequest": { + "type": "structure", + "members": { + "supportType": { + "target": "com.amazonaws.eks#SupportType", + "traits": { + "smithy.api#documentation": "

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED. Use STANDARD to disable extended support.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + } + }, + "com.amazonaws.eks#UpgradePolicyResponse": { + "type": "structure", + "members": { + "supportType": { + "target": "com.amazonaws.eks#SupportType", + "traits": { + "smithy.api#documentation": "

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This value indicates if extended support is enabled or disabled for the cluster.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + } + }, "com.amazonaws.eks#VpcConfigRequest": { "type": "structure", "members": { diff --git a/models/elastic-load-balancing-v2.json b/models/elastic-load-balancing-v2.json index acc08e24c4..c8059f8ca2 100644 --- a/models/elastic-load-balancing-v2.json +++ b/models/elastic-load-balancing-v2.json @@ -1775,6 +1775,23 @@ "com.amazonaws.elasticloadbalancingv2#Default": { "type": "boolean" }, + "com.amazonaws.elasticloadbalancingv2#DeleteAssociationSameAccountException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.elasticloadbalancingv2#ErrorDescription" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "DeleteAssociationSameAccount", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

The specified association cannot be within the same account.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.elasticloadbalancingv2#DeleteListener": { "type": "operation", "input": { @@ -1934,6 +1951,70 @@ "smithy.api#output": {} } }, + "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationInput" + }, + "output": { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteAssociationSameAccountException" + }, + { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationNotFoundException" + }, + { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a shared trust store association.

", + "smithy.api#examples": [ + { + "title": "Delete a shared trust store association", + "documentation": "This example deletes the association between the specified trust store and the specified load balancer.", + "input": { + "TrustStoreArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:truststore/my-trust-store/73e2d6bc24d8a063", + "ResourceArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/app/my-load-balancer/80233fa81d678c2c" + } + } + ] + } + }, + "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationInput": { + "type": "structure", + "members": { + "TrustStoreArn": { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the trust store.

", + "smithy.api#required": {} + } + }, + "ResourceArn": { + "target": "com.amazonaws.elasticloadbalancingv2#ResourceArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.elasticloadbalancingv2#DeleteTargetGroup": { "type": "operation", "input": { @@ -3295,7 +3376,7 @@ "Include": { "target": "com.amazonaws.elasticloadbalancingv2#ListOfDescribeTargetHealthIncludeOptions", "traits": { - "smithy.api#documentation": "

Used to inclue anomaly detection information.

" + "smithy.api#documentation": "

Used to include anomaly detection information.

" } } }, @@ -3459,7 +3540,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the revocation files in use by the specified \n trust store arn, or revocation ID.

", + "smithy.api#documentation": "

Describes the revocation files in use by the specified trust store or revocation\n files.

", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextMarker", @@ -3535,7 +3616,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes all trust stores for a given account \n by trust store arn’s or name.

", + "smithy.api#documentation": "

Describes all trust stores for the specified account.

", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextMarker", @@ -3720,6 +3801,9 @@ { "target": "com.amazonaws.elasticloadbalancingv2#DeleteRule" }, + { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociation" + }, { "target": "com.amazonaws.elasticloadbalancingv2#DeleteTargetGroup" }, @@ -3771,6 +3855,9 @@ { "target": "com.amazonaws.elasticloadbalancingv2#DescribeTrustStores" }, + { + "target": "com.amazonaws.elasticloadbalancingv2#GetResourcePolicy" + }, { "target": "com.amazonaws.elasticloadbalancingv2#GetTrustStoreCaCertificatesBundle" }, @@ -4952,6 +5039,62 @@ "smithy.api#documentation": "

Information about a forward action.

" } }, + "com.amazonaws.elasticloadbalancingv2#GetResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyInput" + }, + "output": { + "target": "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyOutput" + }, + "errors": [ + { + "target": "com.amazonaws.elasticloadbalancingv2#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the resource policy for a specified resource.

", + "smithy.api#examples": [ + { + "title": "Retrieve a resource policy", + "documentation": "This example retrieves the resource policy for the specified trust store.", + "input": { + "ResourceArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:truststore/my-trust-store/73e2d6bc24d8a067" + } + } + ] + } + }, + "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.elasticloadbalancingv2#ResourceArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyOutput": { + "type": "structure", + "members": { + "Policy": { + "target": "com.amazonaws.elasticloadbalancingv2#Policy", + "traits": { + "smithy.api#documentation": "

The content of the resource policy.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.elasticloadbalancingv2#GetTrustStoreCaCertificatesBundle": { "type": "operation", "input": { @@ -6611,7 +6754,7 @@ } ], "traits": { - "smithy.api#documentation": "

Update the ca certificate bundle for a given trust store.

" + "smithy.api#documentation": "

Update the ca certificate bundle for the specified trust store.

" } }, "com.amazonaws.elasticloadbalancingv2#ModifyTrustStoreInput": { @@ -6686,6 +6829,12 @@ "traits": { "smithy.api#documentation": "

Indicates whether expired client certificates are ignored.

" } + }, + "TrustStoreAssociationStatus": { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationStatusEnum", + "traits": { + "smithy.api#documentation": "

Indicates a shared trust stores association status.

" + } } }, "traits": { @@ -6753,6 +6902,14 @@ "smithy.api#documentation": "

Information about a path pattern condition.

" } }, + "com.amazonaws.elasticloadbalancingv2#Policy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, "com.amazonaws.elasticloadbalancingv2#Port": { "type": "integer", "traits": { @@ -7268,6 +7425,23 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.elasticloadbalancingv2#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.elasticloadbalancingv2#ErrorDescription" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ResourceNotFound", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

The specified resource does not exist.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.elasticloadbalancingv2#RevocationContent": { "type": "structure", "members": { @@ -7600,7 +7774,7 @@ "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Note: Internal load balancers must use the ipv4 IP address type.

\n

[Application Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses), dualstack (for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).

\n

[Network Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses). You can’t specify dualstack \n for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses).

", + "smithy.api#documentation": "

Note: Internal load balancers must use the ipv4 IP address type.

\n

[Application Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses), dualstack (for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).

\n

Note: Application Load Balancer authentication only supports IPv4 addresses when \n connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public \n IPv4 address the load balancer cannot complete the authentication process, resulting \n in HTTP 500 errors.

\n

[Network Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses). You can’t specify dualstack \n for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses).

", "smithy.api#required": {} } } @@ -8949,9 +9123,43 @@ "smithy.api#documentation": "

Information about the resources a trust store is associated with.

" } }, + "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.elasticloadbalancingv2#ErrorDescription" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "AssociationNotFound", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

The specified association does not exist.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationResourceArn": { "type": "string" }, + "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationStatusEnum": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "active" + } + }, + "REMOVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "removed" + } + } + } + }, "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociations": { "type": "list", "member": { diff --git a/models/elastic-load-balancing.json b/models/elastic-load-balancing.json index a246f7688e..6f2556ab02 100644 --- a/models/elastic-load-balancing.json +++ b/models/elastic-load-balancing.json @@ -937,6 +937,111 @@ "output": { "DNSName": "my-load-balancer-1234567890.us-west-2.elb.amazonaws.com" } + }, + { + "title": "To create an HTTP load balancer in EC2-Classic", + "documentation": "This example creates a load balancer with an HTTP listener in EC2-Classic.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + } + ], + "AvailabilityZones": [ + "us-west-2a" + ] + }, + "output": { + "DNSName": "my-load-balancer-123456789.us-west-2.elb.amazonaws.com" + } + }, + { + "title": "To create an HTTPS load balancer in a VPC", + "documentation": "This example creates a load balancer with an HTTPS listener in a VPC.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + }, + { + "Protocol": "HTTPS", + "LoadBalancerPort": 443, + "InstanceProtocol": "HTTP", + "InstancePort": 80, + "SSLCertificateId": "arn:aws:iam::123456789012:server-certificate/my-server-cert" + } + ], + "Subnets": [ + "subnet-15aaab61" + ], + "SecurityGroups": [ + "sg-a61988c3" + ] + }, + "output": { + "DNSName": "my-load-balancer-1234567890.us-west-2.elb.amazonaws.com" + } + }, + { + "title": "To create an HTTPS load balancer in EC2-Classic", + "documentation": "This example creates a load balancer with an HTTPS listener in EC2-Classic.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + }, + { + "Protocol": "HTTPS", + "LoadBalancerPort": 443, + "InstanceProtocol": "HTTP", + "InstancePort": 80, + "SSLCertificateId": "arn:aws:iam::123456789012:server-certificate/my-server-cert" + } + ], + "AvailabilityZones": [ + "us-west-2a" + ] + }, + "output": { + "DNSName": "my-load-balancer-123456789.us-west-2.elb.amazonaws.com" + } + }, + { + "title": "To create an internal load balancer", + "documentation": "This example creates an internal load balancer with an HTTP listener in a VPC.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + } + ], + "Subnets": [ + "subnet-15aaab61" + ], + "SecurityGroups": [ + "sg-a61988c3" + ], + "Scheme": "internal" + }, + "output": { + "DNSName": "internal-my-load-balancer-123456789.us-west-2.elb.amazonaws.com" + } } ] } @@ -1014,6 +1119,22 @@ } ] } + }, + { + "title": "To create an HTTPS listener for a load balancer", + "documentation": "This example creates a listener for your load balancer at port 443 using the HTTPS protocol.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTPS", + "LoadBalancerPort": 443, + "InstanceProtocol": "HTTP", + "InstancePort": 80, + "SSLCertificateId": "arn:aws:iam::123456789012:server-certificate/my-server-cert" + } + ] + } } ] } @@ -1060,6 +1181,36 @@ } ] } + }, + { + "title": "To create a public key policy", + "documentation": "This example creates a public key policy.", + "input": { + "LoadBalancerName": "my-load-balancer", + "PolicyName": "my-PublicKey-policy", + "PolicyTypeName": "PublicKeyPolicyType", + "PolicyAttributes": [ + { + "AttributeName": "PublicKey", + "AttributeValue": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwAYUjnfyEyXr1pxjhFWBpMlggUcqoi3kl+dS74kj//c6x7ROtusUaeQCTgIUkayttRDWchuqo1pHC1u+n5xxXnBBe2ejbb2WRsKIQ5rXEeixsjFpFsojpSQKkzhVGI6mJVZBJDVKSHmswnwLBdofLhzvllpovBPTHe+o4haAWvDBALJU0pkSI1FecPHcs2hwxf14zHoXy1e2k36A64nXW43wtfx5qcVSIxtCEOjnYRg7RPvybaGfQ+v6Iaxb/+7J5kEvZhTFQId+bSiJImF1FSUT1W1xwzBZPUbcUkkXDj45vC2s3Z8E+Lk7a3uZhvsQHLZnrfuWjBWGWvZ/MhZYgEXAMPLE" + } + ] + } + }, + { + "title": "To create a backend server authentication policy", + "documentation": "This example creates a backend server authentication policy that enables authentication on your backend instance using a public key policy.", + "input": { + "LoadBalancerName": "my-load-balancer", + "PolicyName": "my-authentication-policy", + "PolicyTypeName": "BackendServerAuthenticationPolicyType", + "PolicyAttributes": [ + { + "AttributeName": "PublicKeyPolicyName", + "AttributeValue": "my-PublicKey-policy" + } + ] + } } ] } @@ -2001,7 +2152,35 @@ "inputToken": "Marker", "outputToken": "NextMarker", "items": "LoadBalancerDescriptions" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeLoadBalancersSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + }, + { + "id": "DescribeLoadBalancersFailure", + "params": { + "LoadBalancerNames": [ + "fake_load_balancer" + ] + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.elasticloadbalancing#DescribeTags": { @@ -4046,6 +4225,28 @@ } } } + }, + { + "title": "To enable connection draining", + "documentation": "This example enables connection draining for the specified load balancer.", + "input": { + "LoadBalancerName": "my-load-balancer", + "LoadBalancerAttributes": { + "ConnectionDraining": { + "Enabled": true, + "Timeout": 300 + } + } + }, + "output": { + "LoadBalancerName": "my-load-balancer", + "LoadBalancerAttributes": { + "ConnectionDraining": { + "Enabled": true, + "Timeout": 300 + } + } + } } ] } diff --git a/models/elasticache.json b/models/elasticache.json index bcc195c9b4..c6cea8493a 100644 --- a/models/elasticache.json +++ b/models/elasticache.json @@ -203,13 +203,13 @@ "ScaleUpModifications": { "target": "com.amazonaws.elasticache#NodeTypeList", "traits": { - "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group.

\n

When scaling up a Redis cluster or replication group using\n ModifyCacheCluster or ModifyReplicationGroup, use a value\n from this list for the CacheNodeType parameter.

" + "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group.

\n

When scaling up a Redis OSS cluster or replication group using\n ModifyCacheCluster or ModifyReplicationGroup, use a value\n from this list for the CacheNodeType parameter.

" } }, "ScaleDownModifications": { "target": "com.amazonaws.elasticache#NodeTypeList", "traits": { - "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group. When scaling down a Redis cluster or\n replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from\n this list for the CacheNodeType parameter.

" + "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group. When scaling down a Redis OSS cluster or\n replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from\n this list for the CacheNodeType parameter.

" } } }, @@ -1886,7 +1886,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the compute and memory capacity node type for the cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the compute and memory capacity node type for the cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -1910,7 +1910,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of cache nodes in the cluster.

\n

For clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" + "smithy.api#documentation": "

The number of cache nodes in the cluster.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" } }, "PreferredAvailabilityZone": { @@ -1973,7 +1973,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SecurityGroups": { @@ -2003,7 +2003,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis\n commands.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis OSS \n commands.

\n

Default: false\n

" } }, "AuthTokenLastModifiedDate": { @@ -2015,13 +2015,13 @@ "TransitEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

\n

Default: false\n

" } }, "AtRestEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables encryption at-rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the cluster\n is created. To enable at-rest encryption on a cluster you must set\n AtRestEncryptionEnabled to true when you create a\n cluster.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables encryption at-rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the cluster\n is created. To enable at-rest encryption on a cluster you must set\n AtRestEncryptionEnabled to true when you create a\n cluster.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

\n

Default: false\n

" } }, "ARN": { @@ -2045,13 +2045,13 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type associated with the cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type associated with the cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "TransitEncryptionMode": { @@ -2262,7 +2262,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis OSS.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "com.amazonaws.elasticache#CacheNodeIdsList": { @@ -2342,7 +2342,7 @@ } }, "traits": { - "smithy.api#documentation": "

A parameter that has a different value for each cache node type it is applied to. For\n example, in a Redis cluster, a cache.m1.large cache node type would have a\n larger maxmemory value than a cache.m1.small type.

" + "smithy.api#documentation": "

A parameter that has a different value for each cache node type it is applied to. For\n example, in a Redis OSS cluster, a cache.m1.large cache node type would have a\n larger maxmemory value than a cache.m1.small type.

" } }, "com.amazonaws.elasticache#CacheNodeTypeSpecificParametersList": { @@ -2826,7 +2826,7 @@ "SupportedNetworkTypes": { "target": "com.amazonaws.elasticache#NetworkTypeList", "traits": { - "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" } } }, @@ -3109,7 +3109,7 @@ "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The 4-digit id for the node group you are configuring. For Redis (cluster mode\n disabled) replication groups, the node group id is always 0001. To find a Redis (cluster\n mode enabled)'s node group's (shard's) id, see Finding a Shard's\n Id.

", + "smithy.api#documentation": "

The 4-digit id for the node group you are configuring. For Redis OSS (cluster mode\n disabled) replication groups, the node group id is always 0001. To find a Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's\n Id.

", "smithy.api#required": {} } }, @@ -3117,14 +3117,14 @@ "target": "com.amazonaws.elasticache#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The number of replicas you want in this node group at the end of this operation.\n The maximum value for NewReplicaCount is 5. The minimum value depends upon\n the type of Redis replication group you are working with.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Redis (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ: 1

      \n
    • \n
    • \n

      If Multi-AZ: 0

      \n
    • \n
    \n
  • \n
  • \n

    Redis (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
", + "smithy.api#documentation": "

The number of replicas you want in this node group at the end of this operation.\n The maximum value for NewReplicaCount is 5. The minimum value depends upon\n the type of Redis OSS replication group you are working with.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Redis OSS (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ: 1

      \n
    • \n
    • \n

      If Multi-AZ: 0

      \n
    • \n
    \n
  • \n
  • \n

    Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
", "smithy.api#required": {} } }, "PreferredAvailabilityZones": { "target": "com.amazonaws.elasticache#PreferredAvailabilityZoneList", "traits": { - "smithy.api#documentation": "

A list of PreferredAvailabilityZone strings that specify which\n availability zones the replication group's nodes are to be in. The nummber of\n PreferredAvailabilityZone values must equal the value of\n NewReplicaCount plus 1 to account for the primary node. If this member\n of ReplicaConfiguration is omitted, ElastiCache for Redis selects the\n availability zone for each of the replicas.

" + "smithy.api#documentation": "

A list of PreferredAvailabilityZone strings that specify which\n availability zones the replication group's nodes are to be in. The nummber of\n PreferredAvailabilityZone values must equal the value of\n NewReplicaCount plus 1 to account for the primary node. If this member\n of ReplicaConfiguration is omitted, ElastiCache (Redis OSS) selects the\n availability zone for each of the replicas.

" } }, "PreferredOutpostArns": { @@ -3173,7 +3173,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an existing serverless cache’s snapshot. Available for Redis only.

" + "smithy.api#documentation": "

Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#CopyServerlessCacheSnapshotRequest": { @@ -3183,7 +3183,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis only.

", + "smithy.api#documentation": "

The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } }, @@ -3191,20 +3191,20 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier for the snapshot to be created. Available for Redis only.

", + "smithy.api#documentation": "

The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the KMS key used to encrypt the target snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "

A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis only. Default: NULL

" + "smithy.api#documentation": "

A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL

" } } }, @@ -3218,7 +3218,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The response for the attempt to copy the serverless cache snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The response for the attempt to copy the serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -3258,7 +3258,7 @@ } ], "traits": { - "smithy.api#documentation": "

Makes a copy of an existing snapshot.

\n \n

This operation is valid for Redis only.

\n
\n \n

Users or groups that have permissions to use the CopySnapshot\n operation can create their own Amazon S3 buckets and copy snapshots to it. To\n control access to your snapshots, use an IAM policy to control who has the ability\n to use the CopySnapshot operation. For more information about using IAM\n to control the use of ElastiCache operations, see Exporting\n Snapshots and Authentication & Access\n Control.

\n
\n

You could receive the following error messages.

\n

\n Error Messages\n

\n
    \n
  • \n

    \n Error Message: The S3 bucket %s is outside of\n the region.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s does not\n exist.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s is not owned\n by the authenticated user.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The authenticated user does\n not have sufficient permissions to perform the desired activity.

    \n

    \n Solution: Contact your system administrator\n to get the needed permissions.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s already\n contains an object with key %s.

    \n

    \n Solution: Give the\n TargetSnapshotName a new and unique value. If exporting a\n snapshot, you could alternatively create a new Amazon S3 bucket and use this\n same value for TargetSnapshotName.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ permissions %s on the S3 Bucket.

    \n

    \n Solution: Add List and Read permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted WRITE permissions %s on the S3 Bucket.

    \n

    \n Solution: Add Upload/Delete permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ_ACP permissions %s on the S3 Bucket.

    \n

    \n Solution: Add View Permissions on the bucket.\n For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
", + "smithy.api#documentation": "

Makes a copy of an existing snapshot.

\n \n

This operation is valid for Redis OSS only.

\n
\n \n

Users or groups that have permissions to use the CopySnapshot\n operation can create their own Amazon S3 buckets and copy snapshots to it. To\n control access to your snapshots, use an IAM policy to control who has the ability\n to use the CopySnapshot operation. For more information about using IAM\n to control the use of ElastiCache operations, see Exporting\n Snapshots and Authentication & Access\n Control.

\n
\n

You could receive the following error messages.

\n

\n Error Messages\n

\n
    \n
  • \n

    \n Error Message: The S3 bucket %s is outside of\n the region.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s does not\n exist.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s is not owned\n by the authenticated user.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The authenticated user does\n not have sufficient permissions to perform the desired activity.

    \n

    \n Solution: Contact your system administrator\n to get the needed permissions.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s already\n contains an object with key %s.

    \n

    \n Solution: Give the\n TargetSnapshotName a new and unique value. If exporting a\n snapshot, you could alternatively create a new Amazon S3 bucket and use this\n same value for TargetSnapshotName.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ permissions %s on the S3 Bucket.

    \n

    \n Solution: Add List and Read permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted WRITE permissions %s on the S3 Bucket.

    \n

    \n Solution: Add Upload/Delete permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ_ACP permissions %s on the S3 Bucket.

    \n

    \n Solution: Add View Permissions on the bucket.\n For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
", "smithy.api#examples": [ { "title": "CopySnapshot", @@ -3409,7 +3409,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache\n engine software, either Memcached or Redis.

\n

This operation is not supported for Redis (cluster mode enabled) clusters.

", + "smithy.api#documentation": "

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache\n engine software, either Memcached or Redis OSS.

\n

This operation is not supported for Redis OSS (cluster mode enabled) clusters.

", "smithy.api#examples": [ { "title": "CreateCacheCluster", @@ -3528,13 +3528,13 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The initial number of cache nodes that the cluster has.

\n

For clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n

If you need more than 40 nodes for your Memcached cluster, please fill out the\n ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

" + "smithy.api#documentation": "

The initial number of cache nodes that the cluster has.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n

If you need more than 40 nodes for your Memcached cluster, please fill out the\n ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

" } }, "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -3582,13 +3582,13 @@ "SnapshotArns": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "

A single-element string list containing an Amazon Resource Name (ARN) that uniquely\n identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to\n populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any\n commas.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" + "smithy.api#documentation": "

A single-element string list containing an Amazon Resource Name (ARN) that uniquely\n identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to\n populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any\n commas.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" } }, "SnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of a Redis snapshot from which to restore data into the new node group\n (shard). The snapshot status changes to restoring while the new node group\n (shard) is being created.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
" + "smithy.api#documentation": "

The name of a Redis OSS snapshot from which to restore data into the new node group\n (shard). The snapshot status changes to restoring while the new node group\n (shard) is being created.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
" } }, "PreferredMaintenanceWindow": { @@ -3612,7 +3612,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -3666,13 +3666,13 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } } }, @@ -4023,7 +4023,7 @@ } ], "traits": { - "smithy.api#documentation": "

Global Datastore for Redis offers fully managed, fast, reliable and secure\n cross-region replication. Using Global Datastore for Redis, you can create cross-region\n read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster\n recovery across regions. For more information, see Replication\n Across Regions Using Global Datastore.

\n
    \n
  • \n

    The GlobalReplicationGroupIdSuffix is the\n name of the Global datastore.

    \n
  • \n
  • \n

    The PrimaryReplicationGroupId represents the\n name of the primary cluster that accepts writes and will replicate updates to\n the secondary cluster.

    \n
  • \n
" + "smithy.api#documentation": "

Global Datastore for Redis OSS offers fully managed, fast, reliable and secure\n cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region\n read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster\n recovery across regions. For more information, see Replication\n Across Regions Using Global Datastore.

\n
    \n
  • \n

    The GlobalReplicationGroupIdSuffix is the\n name of the Global datastore.

    \n
  • \n
  • \n

    The PrimaryReplicationGroupId represents the\n name of the primary cluster that accepts writes and will replicate updates to\n the secondary cluster.

    \n
  • \n
" } }, "com.amazonaws.elasticache#CreateGlobalReplicationGroupMessage": { @@ -4135,7 +4135,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication\n group.

\n

This API can be used to create a standalone regional replication group or a secondary\n replication group associated with a Global datastore.

\n

A Redis (cluster mode disabled) replication group is a collection of nodes, where\n one of the nodes is a read/write primary and the others are read-only replicas.\n Writes to the primary are asynchronously propagated to the replicas.

\n

A Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI:\n node groups). Each shard has a primary node and up to 5 read-only replica nodes. The\n configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which\n is the maximum number or replicas allowed.

\n

The node or shard limit can be increased to a maximum of 500 per cluster if the Redis\n engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node\n cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500\n shards (single primary and no replicas). Make sure there are enough available IP\n addresses to accommodate the increase. Common pitfalls include the subnets in the subnet\n group have too small a CIDR range or the subnets are shared and heavily used by other\n clusters. For more information, see Creating a Subnet\n Group. For versions below 5.0.6, the limit is 250 per cluster.

\n

To request a limit increase, see Amazon Service Limits and\n choose the limit type Nodes per cluster per instance\n type.

\n

When a Redis (cluster mode disabled) replication group has been successfully created,\n you can add one or more read replicas to it, up to a total of 5 read replicas. If you\n need to increase or decrease the number of node groups (console: shards), you can avail\n yourself of ElastiCache for Redis' scaling. For more information, see Scaling\n ElastiCache for Redis Clusters in the ElastiCache User\n Guide.

\n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication\n group.

\n

This API can be used to create a standalone regional replication group or a secondary\n replication group associated with a Global datastore.

\n

A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where\n one of the nodes is a read/write primary and the others are read-only replicas.\n Writes to the primary are asynchronously propagated to the replicas.

\n

A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI:\n node groups). Each shard has a primary node and up to 5 read-only replica nodes. The\n configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which\n is the maximum number or replicas allowed.

\n

The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS \n engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node\n cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500\n shards (single primary and no replicas). Make sure there are enough available IP\n addresses to accommodate the increase. Common pitfalls include the subnets in the subnet\n group have too small a CIDR range or the subnets are shared and heavily used by other\n clusters. For more information, see Creating a Subnet\n Group. For versions below 5.0.6, the limit is 250 per cluster.

\n

To request a limit increase, see Amazon Service Limits and\n choose the limit type Nodes per cluster per instance\n type.

\n

When a Redis OSS (cluster mode disabled) replication group has been successfully created,\n you can add one or more read replicas to it, up to a total of 5 read replicas. If you\n need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. \n For more information, see Scaling\n ElastiCache (Redis OSS) Clusters in the ElastiCache User\n Guide.

\n \n

This operation is valid for Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "CreateCacheReplicationGroup", @@ -4255,7 +4255,7 @@ "AutomaticFailoverEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether a read-only replica is automatically promoted to read/write primary\n if the existing primary fails.

\n

\n AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled)\n replication groups.

\n

Default: false

" + "smithy.api#documentation": "

Specifies whether a read-only replica is automatically promoted to read/write primary\n if the existing primary fails.

\n

\n AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled)\n replication groups.

\n

Default: false

" } }, "MultiAZEnabled": { @@ -4279,7 +4279,7 @@ "NumNodeGroups": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

An optional parameter that specifies the number of node groups (shards) for this Redis\n (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit\n this parameter or set it to 1.

\n

Default: 1

" + "smithy.api#documentation": "

An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit\n this parameter or set it to 1.

\n

Default: 1

" } }, "ReplicasPerNodeGroup": { @@ -4291,13 +4291,13 @@ "NodeGroupConfiguration": { "target": "com.amazonaws.elasticache#NodeGroupConfigurationList", "traits": { - "smithy.api#documentation": "

A list of node group (shard) configuration options. Each node group (shard)\n configuration has the following members: PrimaryAvailabilityZone,\n ReplicaAvailabilityZones, ReplicaCount, and\n Slots.

\n

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled)\n replication group, you can use this parameter to individually configure each node group\n (shard), or you can omit this parameter. However, it is required when seeding a Redis\n (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group\n (shard) using this parameter because you must specify the slots for each node\n group.

" + "smithy.api#documentation": "

A list of node group (shard) configuration options. Each node group (shard)\n configuration has the following members: PrimaryAvailabilityZone,\n ReplicaAvailabilityZones, ReplicaCount, and\n Slots.

\n

If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled)\n replication group, you can use this parameter to individually configure each node group\n (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group\n (shard) using this parameter because you must specify the slots for each node\n group.

" } }, "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -4315,7 +4315,7 @@ "CacheParameterGroupName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the parameter group to associate with this replication group. If this\n argument is omitted, the default cache parameter group for the specified engine is\n used.

\n

If you are running Redis version 3.2.4 or later, only one node group (shard), and want\n to use a default parameter group, we recommend that you specify the parameter group by\n name.

\n
    \n
  • \n

    To create a Redis (cluster mode disabled) replication group, use\n CacheParameterGroupName=default.redis3.2.

    \n
  • \n
  • \n

    To create a Redis (cluster mode enabled) replication group, use\n CacheParameterGroupName=default.redis3.2.cluster.on.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the parameter group to associate with this replication group. If this\n argument is omitted, the default cache parameter group for the specified engine is\n used.

\n

If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want\n to use a default parameter group, we recommend that you specify the parameter group by\n name.

\n
    \n
  • \n

    To create a Redis OSS (cluster mode disabled) replication group, use\n CacheParameterGroupName=default.redis3.2.

    \n
  • \n
  • \n

    To create a Redis OSS (cluster mode enabled) replication group, use\n CacheParameterGroupName=default.redis3.2.cluster.on.

    \n
  • \n
" } }, "CacheSubnetGroupName": { @@ -4345,7 +4345,7 @@ "SnapshotArns": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "

A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot\n files stored in Amazon S3. The snapshot files are used to populate the new replication\n group. The Amazon S3 object name in the ARN cannot contain any commas. The new\n replication group will have the number of node groups (console: shards) specified by the\n parameter NumNodeGroups or the number of node groups configured by\n NodeGroupConfiguration regardless of the number of ARNs\n specified here.

\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" + "smithy.api#documentation": "

A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot\n files stored in Amazon S3. The snapshot files are used to populate the new replication\n group. The Amazon S3 object name in the ARN cannot contain any commas. The new\n replication group will have the number of node groups (console: shards) specified by the\n parameter NumNodeGroups or the number of node groups configured by\n NodeGroupConfiguration regardless of the number of ARNs\n specified here.

\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" } }, "SnapshotName": { @@ -4375,7 +4375,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -4399,13 +4399,13 @@ "TransitEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

This parameter is valid only if the Engine parameter is\n redis, the EngineVersion parameter is 3.2.6,\n 4.x or later, and the cluster is being created in an Amazon VPC.

\n

If you enable in-transit encryption, you must also specify a value for\n CacheSubnetGroup.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

\n

Default: false\n

\n \n

For HIPAA compliance, you must specify TransitEncryptionEnabled as\n true, an AuthToken, and a\n CacheSubnetGroup.

\n
" + "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

This parameter is valid only if the Engine parameter is\n redis, the EngineVersion parameter is 3.2.6,\n 4.x or later, and the cluster is being created in an Amazon VPC.

\n

If you enable in-transit encryption, you must also specify a value for\n CacheSubnetGroup.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

\n

Default: false\n

\n \n

For HIPAA compliance, you must specify TransitEncryptionEnabled as\n true, an AuthToken, and a\n CacheSubnetGroup.

\n
" } }, "AtRestEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables encryption at rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled to true when you create the\n replication group.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables encryption at rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled to true when you create the\n replication group.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

\n

Default: false\n

" } }, "KmsKeyId": { @@ -4435,31 +4435,31 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when creating a replication group, either\n ipv4 | ipv6. IPv6 is supported for workloads using Redis\n engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on\n the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when creating a replication group, either\n ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS\n engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on\n the Nitro system.

" } }, "TransitEncryptionMode": { "target": "com.amazonaws.elasticache#TransitEncryptionMode", "traits": { - "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

When setting TransitEncryptionEnabled to true, you can set\n your TransitEncryptionMode to preferred in the same request,\n to allow both encrypted and unencrypted connections at the same time. Once you migrate\n all your Redis clients to use encrypted connections you can modify the value to\n required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

\n

This process will not trigger the replacement of the replication group.

" + "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

When setting TransitEncryptionEnabled to true, you can set\n your TransitEncryptionMode to preferred in the same request,\n to allow both encrypted and unencrypted connections at the same time. Once you migrate\n all your Redis OSS clients to use encrypted connections you can modify the value to\n required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

\n

This process will not trigger the replacement of the replication group.

" } }, "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } }, "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the snapshot used to create a replication group. Available for Redis only.

" + "smithy.api#documentation": "

The name of the snapshot used to create a replication group. Available for Redis OSS only.

" } } }, @@ -4578,7 +4578,7 @@ "SnapshotArnsToRestore": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "

The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only.

" + "smithy.api#documentation": "

The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only.

" } }, "Tags": { @@ -4590,7 +4590,7 @@ "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL.

" + "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL.

" } }, "SubnetIds": { @@ -4602,13 +4602,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of snapshots that will be retained for the serverless cache that is being created. \n As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only.

" + "smithy.api#documentation": "

The number of snapshots that will be retained for the serverless cache that is being created. \n As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only.

" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The daily time that snapshots will be created from the new serverless cache. By default this number is populated with \n 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis only.

" + "smithy.api#documentation": "

The daily time that snapshots will be created from the new serverless cache. By default this number is populated with \n 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -4665,7 +4665,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis only.

" + "smithy.api#documentation": "

This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#CreateServerlessCacheSnapshotRequest": { @@ -4675,7 +4675,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name for the snapshot being created. Must be unique for the customer account. Available for Redis only.\n Must be between 1 and 255 characters.

", + "smithy.api#documentation": "

The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only.\n Must be between 1 and 255 characters.

", "smithy.api#required": {} } }, @@ -4683,20 +4683,20 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis only.

", + "smithy.api#documentation": "

The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The ID of the KMS key used to encrypt the snapshot. Available for Redis only. Default: NULL

" + "smithy.api#documentation": "

The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL

" } }, "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "

A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis only.

" + "smithy.api#documentation": "

A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -4710,7 +4710,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis only.

" + "smithy.api#documentation": "

The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -4759,7 +4759,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an entire cluster or replication group at a specific moment in\n time.

\n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Creates a copy of an entire cluster or replication group at a specific moment in\n time.

\n \n

This operation is valid for Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "CreateSnapshot - NonClustered Redis, 2 read-replicas", @@ -4962,7 +4962,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis engine version 6.0 onwards: Creates a Redis user. For more information, see\n Using Role Based Access Control (RBAC).

" + "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see\n Using Role Based Access Control (RBAC).

" } }, "com.amazonaws.elasticache#CreateUserGroup": { @@ -5000,7 +5000,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis engine version 6.0 onwards: Creates a Redis user group. For more\n information, see Using Role Based Access Control (RBAC)\n

" + "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more\n information, see Using Role Based Access Control (RBAC)\n

" } }, "com.amazonaws.elasticache#CreateUserGroupMessage": { @@ -5018,7 +5018,7 @@ "target": "com.amazonaws.elasticache#EngineType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The current supported value is Redis.

", + "smithy.api#documentation": "

The current supported value is Redis user.

", "smithy.api#required": {} } }, @@ -5031,7 +5031,7 @@ "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "

A list of tags to be added to this resource. A tag is a key-value pair. A tag key must\n be accompanied by a tag value, although null is accepted. Available for Redis only.

" + "smithy.api#documentation": "

A list of tags to be added to this resource. A tag is a key-value pair. A tag key must\n be accompanied by a tag value, although null is accepted. Available for Redis OSS only.

" } } }, @@ -5233,13 +5233,13 @@ "GlobalNodeGroupsToRemove": { "target": "com.amazonaws.elasticache#GlobalNodeGroupIdList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.\n ElastiCache for Redis will attempt to remove all node groups listed by\n GlobalNodeGroupsToRemove from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.\n ElastiCache (Redis OSS) will attempt to remove all node groups listed by\n GlobalNodeGroupsToRemove from the cluster.

" } }, "GlobalNodeGroupsToRetain": { "target": "com.amazonaws.elasticache#GlobalNodeGroupIdList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster.\n ElastiCache for Redis will attempt to retain all node groups listed by\n GlobalNodeGroupsToRetain from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster.\n ElastiCache (Redis OSS) will attempt to retain all node groups listed by\n GlobalNodeGroupsToRetain from the cluster.

" } }, "ApplyImmediately": { @@ -5313,7 +5313,7 @@ } ], "traits": { - "smithy.api#documentation": "

Dynamically decreases the number of replicas in a Redis (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" + "smithy.api#documentation": "

Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" } }, "com.amazonaws.elasticache#DecreaseReplicaCountMessage": { @@ -5330,13 +5330,13 @@ "NewReplicaCount": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For\n Redis (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Redis (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ is enabled: 1

      \n
    • \n
    • \n

      If Multi-AZ is not enabled: 0

      \n
    • \n
    \n
  • \n
  • \n

    Redis (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
" + "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Redis OSS (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ is enabled: 1

      \n
    • \n
    • \n

      If Multi-AZ is not enabled: 0

      \n
    • \n
    \n
  • \n
  • \n

    Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
" } }, "ReplicaConfiguration": { "target": "com.amazonaws.elasticache#ReplicaConfigurationList", "traits": { - "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Redis (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" + "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" } }, "ReplicasToRemove": { @@ -5435,7 +5435,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all\n associated cache nodes, node endpoints and the cluster itself. When you receive a\n successful response from this operation, Amazon ElastiCache immediately begins deleting\n the cluster; you cannot cancel or revert this operation.

\n

This operation is not valid for:

\n
    \n
  • \n

    Redis (cluster mode enabled) clusters

    \n
  • \n
  • \n

    Redis (cluster mode disabled) clusters

    \n
  • \n
  • \n

    A cluster that is the last read replica of a replication group

    \n
  • \n
  • \n

    A cluster that is the primary node of a replication group

    \n
  • \n
  • \n

    A node group (shard) that has Multi-AZ mode enabled

    \n
  • \n
  • \n

    A cluster from a Redis (cluster mode enabled) replication group

    \n
  • \n
  • \n

    A cluster that is not in the available state

    \n
  • \n
", + "smithy.api#documentation": "

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all\n associated cache nodes, node endpoints and the cluster itself. When you receive a\n successful response from this operation, Amazon ElastiCache immediately begins deleting\n the cluster; you cannot cancel or revert this operation.

\n

This operation is not valid for:

\n
    \n
  • \n

    Redis OSS (cluster mode enabled) clusters

    \n
  • \n
  • \n

    Redis OSS (cluster mode disabled) clusters

    \n
  • \n
  • \n

    A cluster that is the last read replica of a replication group

    \n
  • \n
  • \n

    A cluster that is the primary node of a replication group

    \n
  • \n
  • \n

    A node group (shard) that has Multi-AZ mode enabled

    \n
  • \n
  • \n

    A cluster from a Redis OSS (cluster mode enabled) replication group

    \n
  • \n
  • \n

    A cluster that is not in the available state

    \n
  • \n
", "smithy.api#examples": [ { "title": "DeleteCacheCluster", @@ -5748,7 +5748,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing replication group. By default, this operation deletes the entire\n replication group, including the primary/primaries and all of the read replicas. If the\n replication group has only one primary, you can optionally delete only the read\n replicas, while retaining the primary by setting\n RetainPrimaryCluster=true.

\n

When you receive a successful response from this operation, Amazon ElastiCache\n immediately begins deleting the selected resources; you cannot cancel or revert this\n operation.

\n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Deletes an existing replication group. By default, this operation deletes the entire\n replication group, including the primary/primaries and all of the read replicas. If the\n replication group has only one primary, you can optionally delete only the read\n replicas, while retaining the primary by setting\n RetainPrimaryCluster=true.

\n

When you receive a successful response from this operation, Amazon ElastiCache\n immediately begins deleting the selected resources; you cannot cancel or revert this\n operation.

\n \n
    \n
  • \n

    \n CreateSnapshot permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied exception.

    \n
  • \n
  • \n

    This operation is valid for Redis OSS only.

    \n
  • \n
\n
", "smithy.api#examples": [ { "title": "DeleteReplicationGroup", @@ -5842,7 +5842,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified existing serverless cache.

" + "smithy.api#documentation": "

Deletes a specified existing serverless cache.

\n \n

\n CreateServerlessCacheSnapshot permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied exception.

\n
" } }, "com.amazonaws.elasticache#DeleteServerlessCacheRequest": { @@ -5859,7 +5859,7 @@ "FinalSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis only.\n Default: NULL, i.e. a final snapshot is not taken.

" + "smithy.api#documentation": "

Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only.\n Default: NULL, i.e. a final snapshot is not taken.

" } } }, @@ -5904,7 +5904,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing serverless cache snapshot. Available for Redis only.

" + "smithy.api#documentation": "

Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#DeleteServerlessCacheSnapshotRequest": { @@ -5914,7 +5914,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Idenfitier of the snapshot to be deleted. Available for Redis only.

", + "smithy.api#documentation": "

Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } } @@ -5929,7 +5929,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The snapshot to be deleted. Available for Redis only.

" + "smithy.api#documentation": "

The snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -5960,7 +5960,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing snapshot. When you receive a successful response from this\n operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or\n revert this operation.

\n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Deletes an existing snapshot. When you receive a successful response from this\n operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or\n revert this operation.

\n \n

This operation is valid for Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "DeleteSnapshot", @@ -6056,7 +6056,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis engine version 6.0 onwards: Deletes a user. The user will be removed from\n all user groups and in turn removed from all replication groups. For more information,\n see Using Role Based Access Control (RBAC).

" + "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from\n all user groups and in turn removed from all replication groups. For more information,\n see Using Role Based Access Control (RBAC).

" } }, "com.amazonaws.elasticache#DeleteUserGroup": { @@ -6082,7 +6082,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis engine version 6.0 onwards: Deletes a user group. The user group must first\n be disassociated from the replication group before it can be deleted. For more\n information, see Using Role Based Access Control (RBAC).

" + "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first\n be disassociated from the replication group before it can be deleted. For more\n information, see Using Role Based Access Control (RBAC).

" } }, "com.amazonaws.elasticache#DeleteUserGroupMessage": { @@ -6418,7 +6418,7 @@ "ShowCacheClustersNotInReplicationGroups": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

An optional flag that can be included in the DescribeCacheCluster request\n to show only nodes (API/CLI: clusters) that are not members of a replication group. In\n practice, this mean Memcached and single node Redis clusters.

" + "smithy.api#documentation": "

An optional flag that can be included in the DescribeCacheCluster request\n to show only nodes (API/CLI: clusters) that are not members of a replication group. In\n practice, this mean Memcached and single node Redis OSS clusters.

" } } }, @@ -8311,7 +8311,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about a particular replication group. If no identifier is\n specified, DescribeReplicationGroups returns information about all\n replication groups.

\n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Returns information about a particular replication group. If no identifier is\n specified, DescribeReplicationGroups returns information about all\n replication groups.

\n \n

This operation is valid for Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "DescribeReplicationGroups", @@ -8521,7 +8521,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only those reservations\n matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only those reservations\n matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Duration": { @@ -8902,7 +8902,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only the available\n offerings matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only the available\n offerings matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Duration": { @@ -8964,7 +8964,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about serverless cache snapshots. \n By default, this API lists all of the customer’s serverless cache snapshots. \n It can also describe a single serverless cache snapshot, or the snapshots associated with \n a particular serverless cache. Available for Redis only.

", + "smithy.api#documentation": "

Returns information about serverless cache snapshots. \n By default, this API lists all of the customer’s serverless cache snapshots. \n It can also describe a single serverless cache snapshot, or the snapshots associated with \n a particular serverless cache. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8979,31 +8979,31 @@ "ServerlessCacheName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of serverless cache. If this parameter is specified, \n only snapshots associated with that specific serverless cache are described. Available for Redis only.

" + "smithy.api#documentation": "

The identifier of serverless cache. If this parameter is specified, \n only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only.

" } }, "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the serverless cache’s snapshot.\n If this parameter is specified, only this snapshot is described. Available for Redis only.

" + "smithy.api#documentation": "

The identifier of the serverless cache’s snapshot.\n If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only.

" } }, "SnapshotType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The type of snapshot that is being described. Available for Redis only.

" + "smithy.api#documentation": "

The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only.

" } }, "NextToken": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis only.

" + "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.

" } }, "MaxResults": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The maximum number of records to include in the response. If more records exist than \n the specified max-results value, a market is included in the response so that remaining results \n can be retrieved. Available for Redis only.The default is 50. The Validation Constraints are a maximum of 50.

" + "smithy.api#documentation": "

The maximum number of records to include in the response. If more records exist than \n the specified max-results value, a market is included in the response so that remaining results \n can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50.

" } } }, @@ -9017,13 +9017,13 @@ "NextToken": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis only.

" + "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.

" } }, "ServerlessCacheSnapshots": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshotList", "traits": { - "smithy.api#documentation": "

The serverless caches snapshots associated with a given description request. Available for Redis only.

" + "smithy.api#documentation": "

The serverless caches snapshots associated with a given description request. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -9190,7 +9190,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about cluster or replication group snapshots. By default,\n DescribeSnapshots lists all of your snapshots; it can optionally\n describe a single snapshot, or just the snapshots associated with a particular cache\n cluster.

\n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Returns information about cluster or replication group snapshots. By default,\n DescribeSnapshots lists all of your snapshots; it can optionally\n describe a single snapshot, or just the snapshots associated with a particular cache\n cluster.

\n \n

This operation is valid for Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "DescribeSnapshots", @@ -9363,7 +9363,7 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis or Memcached

" + "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" } }, "ServiceUpdateStatus": { @@ -9517,7 +9517,7 @@ "Engine": { "target": "com.amazonaws.elasticache#EngineType", "traits": { - "smithy.api#documentation": "

The Redis engine.

" + "smithy.api#documentation": "

The Redis OSS engine.

" } }, "UserId": { @@ -9896,7 +9896,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis only.

" + "smithy.api#documentation": "

Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only.

" } }, "com.amazonaws.elasticache#ExportServerlessCacheSnapshotRequest": { @@ -9906,7 +9906,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the serverless cache snapshot to be exported to S3. Available for Redis only.

", + "smithy.api#documentation": "

The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only.

", "smithy.api#required": {} } }, @@ -9914,7 +9914,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region \n as the snapshot. Available for Redis only.

", + "smithy.api#documentation": "

Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region \n as the snapshot. Available for Redis OSS only.

", "smithy.api#required": {} } } @@ -9929,7 +9929,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis only.

" + "smithy.api#documentation": "

The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -10127,13 +10127,13 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine. For Redis only.

" + "smithy.api#documentation": "

The Elasticache engine. For Redis OSS only.

" } }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache Redis engine version.

" + "smithy.api#documentation": "

The Elasticache (Redis OSS) engine version.

" } }, "Members": { @@ -10157,19 +10157,19 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis\n commands.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis OSS \n commands.

\n

Default: false\n

" } }, "TransitEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

" + "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

" } }, "AtRestEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables encryption at rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled to true when you create the\n replication group.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

" + "smithy.api#documentation": "

A flag that enables encryption at rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled to true when you create the\n replication group.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

" } }, "ARN": { @@ -10412,7 +10412,7 @@ } ], "traits": { - "smithy.api#documentation": "

Dynamically increases the number of replicas in a Redis (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" + "smithy.api#documentation": "

Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" } }, "com.amazonaws.elasticache#IncreaseReplicaCountMessage": { @@ -10429,13 +10429,13 @@ "NewReplicaCount": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For\n Redis (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

" + "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

" } }, "ReplicaConfiguration": { "target": "com.amazonaws.elasticache#ReplicaConfigurationList", "traits": { - "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Redis (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" + "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" } }, "ApplyImmediately": { @@ -10696,7 +10696,7 @@ "code": "InvalidServerlessCacheSnapshotStateFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The state of the serverless cache snapshot was not received. Available for Redis only.

", + "smithy.api#documentation": "

The state of the serverless cache snapshot was not received. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -10863,7 +10863,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all available node types that you can scale your Redis cluster's or replication\n group's current node type.

\n

When you use the ModifyCacheCluster or\n ModifyReplicationGroup operations to scale your cluster or replication\n group, the value of the CacheNodeType parameter must be one of the node\n types returned by this operation.

", + "smithy.api#documentation": "

Lists all available node types that you can scale your Redis OSS cluster's or replication\n group's current node type.

\n

When you use the ModifyCacheCluster or\n ModifyReplicationGroup operations to scale your cluster or replication\n group, the value of the CacheNodeType parameter must be one of the node\n types returned by this operation.

", "smithy.api#examples": [ { "title": "ListAllowedNodeTypeModifications", @@ -11281,7 +11281,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of cache nodes that the cluster should have. If the value for\n NumCacheNodes is greater than the sum of the number of current cache\n nodes and the number of cache nodes pending creation (which may be zero), more nodes are\n added. If the value is less than the number of existing cache nodes, nodes are removed.\n If the value is equal to the number of current cache nodes, any pending add or remove\n requests are canceled.

\n

If you are removing cache nodes, you must use the CacheNodeIdsToRemove\n parameter to provide the IDs of the specific cache nodes to remove.

\n

For clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n \n

Adding or removing Memcached cache nodes can be applied immediately or as a\n pending operation (see ApplyImmediately).

\n

A pending operation to modify the number of cache nodes in a cluster during its\n maintenance window, whether by adding or removing nodes in accordance with the scale\n out architecture, is not queued. The customer's latest request to add or remove\n nodes to the cluster overrides any previous pending operations to modify the number\n of cache nodes in the cluster. For example, a request to remove 2 nodes would\n override a previous pending operation to remove 3 nodes. Similarly, a request to add\n 2 nodes would override a previous pending operation to remove 3 nodes and vice\n versa. As Memcached cache nodes may now be provisioned in different Availability\n Zones with flexible cache node placement, a request to add nodes does not\n automatically override a previous pending operation to add nodes. The customer can\n modify the previous pending operation to add more nodes or explicitly cancel the\n pending request and retry the new request. To cancel pending operations to modify\n the number of cache nodes in a cluster, use the ModifyCacheCluster\n request and set NumCacheNodes equal to the number of cache nodes\n currently in the cluster.

\n
" + "smithy.api#documentation": "

The number of cache nodes that the cluster should have. If the value for\n NumCacheNodes is greater than the sum of the number of current cache\n nodes and the number of cache nodes pending creation (which may be zero), more nodes are\n added. If the value is less than the number of existing cache nodes, nodes are removed.\n If the value is equal to the number of current cache nodes, any pending add or remove\n requests are canceled.

\n

If you are removing cache nodes, you must use the CacheNodeIdsToRemove\n parameter to provide the IDs of the specific cache nodes to remove.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n \n

Adding or removing Memcached cache nodes can be applied immediately or as a\n pending operation (see ApplyImmediately).

\n

A pending operation to modify the number of cache nodes in a cluster during its\n maintenance window, whether by adding or removing nodes in accordance with the scale\n out architecture, is not queued. The customer's latest request to add or remove\n nodes to the cluster overrides any previous pending operations to modify the number\n of cache nodes in the cluster. For example, a request to remove 2 nodes would\n override a previous pending operation to remove 3 nodes. Similarly, a request to add\n 2 nodes would override a previous pending operation to remove 3 nodes and vice\n versa. As Memcached cache nodes may now be provisioned in different Availability\n Zones with flexible cache node placement, a request to add nodes does not\n automatically override a previous pending operation to add nodes. The customer can\n modify the previous pending operation to add more nodes or explicitly cancel the\n pending request and retry the new request. To cancel pending operations to modify\n the number of cache nodes in a cluster, use the ModifyCacheCluster\n request and set NumCacheNodes equal to the number of cache nodes\n currently in the cluster.

\n
" } }, "CacheNodeIdsToRemove": { @@ -11353,7 +11353,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -11383,7 +11383,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis AUTH\n

" + "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis OSS AUTH\n

" } }, "LogDeliveryConfigurations": { @@ -11395,7 +11395,7 @@ "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } } }, @@ -11758,7 +11758,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the settings for a replication group. This is limited to Redis 7 and newer.

\n \n \n

This operation is valid for Redis only.

\n
", + "smithy.api#documentation": "

Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer.

\n \n \n

This operation is valid for Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "ModifyReplicationGroup", @@ -11857,7 +11857,7 @@ "SnapshottingClusterId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cluster ID that is used as the daily snapshot source for the replication group.\n This parameter cannot be set for Redis (cluster mode enabled) replication groups.

" + "smithy.api#documentation": "

The cluster ID that is used as the daily snapshot source for the replication group.\n This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.

" } }, "AutomaticFailoverEnabled": { @@ -11930,7 +11930,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -11960,7 +11960,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis AUTH\n

" + "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis OSS AUTH\n

" } }, "UserGroupIdsToAdd": { @@ -11990,7 +11990,7 @@ "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "TransitEncryptionEnabled": { @@ -12002,13 +12002,13 @@ "TransitEncryptionMode": { "target": "com.amazonaws.elasticache#TransitEncryptionMode", "traits": { - "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

You must set TransitEncryptionEnabled to true, for your\n existing cluster, and set TransitEncryptionMode to preferred\n in the same request to allow both encrypted and unencrypted connections at the same\n time. Once you migrate all your Redis clients to use encrypted connections you can set\n the value to required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

" + "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

You must set TransitEncryptionEnabled to true, for your\n existing cluster, and set TransitEncryptionMode to preferred\n in the same request to allow both encrypted and unencrypted connections at the same\n time. Once you migrate all your Redis OSS clients to use encrypted connections you can set\n the value to required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

" } }, "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } } }, @@ -12079,7 +12079,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Redis (cluster mode enabled) cluster (replication group) on which the\n shards are to be configured.

", + "smithy.api#documentation": "

The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the\n shards are to be configured.

", "smithy.api#required": {} } }, @@ -12108,13 +12108,13 @@ "NodeGroupsToRemove": { "target": "com.amazonaws.elasticache#NodeGroupsToRemoveList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRemove is a\n list of NodeGroupIds to remove from the cluster.

\n

ElastiCache for Redis will attempt to remove all node groups listed by\n NodeGroupsToRemove from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRemove is a\n list of NodeGroupIds to remove from the cluster.

\n

ElastiCache (Redis OSS) will attempt to remove all node groups listed by\n NodeGroupsToRemove from the cluster.

" } }, "NodeGroupsToRetain": { "target": "com.amazonaws.elasticache#NodeGroupsToRetainList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRetain is a\n list of NodeGroupIds to retain in the cluster.

\n

ElastiCache for Redis will attempt to remove all node groups except those listed by\n NodeGroupsToRetain from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRetain is a\n list of NodeGroupIds to retain in the cluster.

\n

ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by\n NodeGroupsToRetain from the cluster.

" } } }, @@ -12198,13 +12198,13 @@ "RemoveUserGroup": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

The identifier of the UserGroup to be removed from association with the Redis serverless cache. Available for Redis only. Default is NULL.

" + "smithy.api#documentation": "

The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL.

" } }, "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. \n Default is NULL - the existing UserGroup is not removed.

" + "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. \n Default is NULL - the existing UserGroup is not removed.

" } }, "SecurityGroupIds": { @@ -12216,13 +12216,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days for which Elasticache retains automatic snapshots before deleting them. \n Available for Redis only.\n Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. \n The maximum value allowed is 35 days.

" + "smithy.api#documentation": "

The number of days for which Elasticache retains automatic snapshots before deleting them. \n Available for Redis OSS and Serverless Memcached only.\n Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. \n The maximum value allowed is 35 days.

" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis only.\n The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.

" + "smithy.api#documentation": "

The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only.\n The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.

" } } }, @@ -12454,7 +12454,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier for the node group (shard). A Redis (cluster mode disabled) replication\n group contains only 1 node group; therefore, the node group ID is 0001. A Redis (cluster\n mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090.\n Optionally, the user can provide the id for a node group.

" + "smithy.api#documentation": "

The identifier for the node group (shard). A Redis OSS (cluster mode disabled) replication\n group contains only 1 node group; therefore, the node group ID is 0001. A Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090.\n Optionally, the user can provide the id for a node group.

" } }, "Status": { @@ -12498,7 +12498,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { - "smithy.api#documentation": "

Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" + "smithy.api#documentation": "

Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" } }, "Slots": { @@ -12578,7 +12578,7 @@ "ReadEndpoint": { "target": "com.amazonaws.elasticache#Endpoint", "traits": { - "smithy.api#documentation": "

The information required for client programs to connect to a node for read operations.\n The read endpoint is only applicable on Redis (cluster mode disabled) clusters.

" + "smithy.api#documentation": "

The information required for client programs to connect to a node for read operations.\n The read endpoint is only applicable on Redis OSS (cluster mode disabled) clusters.

" } }, "PreferredAvailabilityZone": { @@ -12596,7 +12596,7 @@ "CurrentRole": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The role that is currently assigned to the node - primary or\n replica. This member is only applicable for Redis (cluster mode\n disabled) replication groups.

" + "smithy.api#documentation": "

The role that is currently assigned to the node - primary or\n replica. This member is only applicable for Redis OSS (cluster mode\n disabled) replication groups.

" } } }, @@ -13140,7 +13140,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The new number of cache nodes for the cluster.

\n

For clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" + "smithy.api#documentation": "

The new number of cache nodes for the cluster.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" } }, "CacheNodeIdsToRemove": { @@ -13232,7 +13232,7 @@ "UpdateActionStatus": { "target": "com.amazonaws.elasticache#UpdateActionStatus", "traits": { - "smithy.api#documentation": "

The status of the update action on the Redis cluster

" + "smithy.api#documentation": "

The status of the update action on the Redis OSS cluster

" } } }, @@ -13278,7 +13278,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible\n for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis or Managing Costs with\n Reserved Nodes for Memcached.

", + "smithy.api#documentation": "

Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible\n for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with\n Reserved Nodes for Memcached.

", "smithy.api#examples": [ { "title": "PurchaseReservedCacheNodesOfferings", @@ -13411,7 +13411,7 @@ } ], "traits": { - "smithy.api#documentation": "

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation\n applies any modified cache parameter groups to the cluster. The reboot operation takes\n place as soon as possible, and results in a momentary outage to the cluster. During the\n reboot, the cluster status is set to REBOOTING.

\n

The reboot causes the contents of the cache (for each cache node being rebooted) to be\n lost.

\n

When the reboot is complete, a cluster event is created.

\n

Rebooting a cluster is currently supported on Memcached and Redis (cluster mode\n disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled)\n clusters.

\n

If you make changes to parameters that require a Redis (cluster mode enabled) cluster\n reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

", + "smithy.api#documentation": "

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation\n applies any modified cache parameter groups to the cluster. The reboot operation takes\n place as soon as possible, and results in a momentary outage to the cluster. During the\n reboot, the cluster status is set to REBOOTING.

\n

The reboot causes the contents of the cache (for each cache node being rebooted) to be\n lost.

\n

When the reboot is complete, a cluster event is created.

\n

Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode\n disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled)\n clusters.

\n

If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster\n reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

", "smithy.api#examples": [ { "title": "RebootCacheCluster", @@ -13742,7 +13742,7 @@ "NodeGroups": { "target": "com.amazonaws.elasticache#NodeGroupList", "traits": { - "smithy.api#documentation": "

A list of node groups in this replication group. For Redis (cluster mode disabled)\n replication groups, this is a single-element list. For Redis (cluster mode enabled)\n replication groups, the list contains an entry for each node group (shard).

" + "smithy.api#documentation": "

A list of node groups in this replication group. For Redis OSS (cluster mode disabled)\n replication groups, this is a single-element list. For Redis OSS (cluster mode enabled)\n replication groups, the list contains an entry for each node group (shard).

" } }, "SnapshottingClusterId": { @@ -13754,7 +13754,7 @@ "AutomaticFailover": { "target": "com.amazonaws.elasticache#AutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "

Indicates the status of automatic failover for this Redis replication group.

" + "smithy.api#documentation": "

Indicates the status of automatic failover for this Redis OSS replication group.

" } }, "MultiAZ": { @@ -13796,7 +13796,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis\n commands.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis OSS \n commands.

\n

Default: false\n

" } }, "AuthTokenLastModifiedDate": { @@ -13808,13 +13808,13 @@ "TransitEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables in-transit encryption when set to true.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

\n

Default: false\n

" } }, "AtRestEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables encryption at-rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the cluster\n is created. To enable encryption at-rest on a cluster you must set\n AtRestEncryptionEnabled to true when you create a\n cluster.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6, 4.x or\n later.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables encryption at-rest when set to true.

\n

You cannot modify the value of AtRestEncryptionEnabled after the cluster\n is created. To enable encryption at-rest on a cluster you must set\n AtRestEncryptionEnabled to true when you create a\n cluster.

\n

\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or\n later.

\n

Default: false\n

" } }, "MemberClustersOutpostArns": { @@ -13862,19 +13862,19 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "

If you are running Redis engine version 6.0 or later, set this parameter to yes if you\n want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.

" + "smithy.api#documentation": "

If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you\n want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.

" } }, "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "TransitEncryptionMode": { @@ -13886,12 +13886,12 @@ "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } } }, "traits": { - "smithy.api#documentation": "

Contains all of the attributes of a specific Redis replication group.

" + "smithy.api#documentation": "

Contains all of the attributes of a specific Redis OSS replication group.

" } }, "com.amazonaws.elasticache#ReplicationGroupAlreadyExistsFault": { @@ -14025,7 +14025,7 @@ "AutomaticFailoverStatus": { "target": "com.amazonaws.elasticache#PendingAutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "

Indicates the status of automatic failover for this Redis replication group.

" + "smithy.api#documentation": "

Indicates the status of automatic failover for this Redis OSS replication group.

" } }, "Resharding": { @@ -14067,12 +14067,12 @@ "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } } }, "traits": { - "smithy.api#documentation": "

The settings to be applied to the Redis replication group, either immediately or\n during the next maintenance window.

" + "smithy.api#documentation": "

The settings to be applied to the Redis OSS replication group, either immediately or\n during the next maintenance window.

" } }, "com.amazonaws.elasticache#ReservedCacheNode": { @@ -14093,7 +14093,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type for the reserved cache nodes.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type for the reserved cache nodes.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "StartTime": { @@ -14254,7 +14254,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type for the reserved cache node.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type for the reserved cache node.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Duration": { @@ -14422,7 +14422,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { - "smithy.api#documentation": "

Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" + "smithy.api#documentation": "

Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" } }, "PreferredAvailabilityZones": { @@ -14656,7 +14656,7 @@ "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.

" + "smithy.api#documentation": "

The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.

" } }, "SubnetIds": { @@ -14668,13 +14668,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The current setting for the number of serverless cache snapshots the system will retain. Available for Redis only.

" + "smithy.api#documentation": "

The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.

" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a\n specific time on a daily basis. Available for Redis only.

" + "smithy.api#documentation": "

The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a\n specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -14771,60 +14771,60 @@ "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of a serverless cache snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The identifier of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "ARN": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "SnapshotType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The type of snapshot of serverless cache. Available for Redis only.

" + "smithy.api#documentation": "

The type of snapshot of serverless cache. Available for Redis OSS and Serverless Memcached only.

" } }, "Status": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The current status of the serverless cache. Available for Redis only.

" + "smithy.api#documentation": "

The current status of the serverless cache. Available for Redis OSS and Serverless Memcached only.

" } }, "CreateTime": { "target": "com.amazonaws.elasticache#TStamp", "traits": { - "smithy.api#documentation": "

The date and time that the source serverless cache's metadata and cache data set was obtained for\n the snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The date and time that the source serverless cache's metadata and cache data set was obtained for\n the snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "ExpiryTime": { "target": "com.amazonaws.elasticache#TStamp", "traits": { - "smithy.api#documentation": "

The time that the serverless cache snapshot will expire. Available for Redis only.

" + "smithy.api#documentation": "

The time that the serverless cache snapshot will expire. Available for Redis OSS and Serverless Memcached only.

" } }, "BytesUsedForCache": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The total size of a serverless cache snapshot, in bytes. Available for Redis only.

" + "smithy.api#documentation": "

The total size of a serverless cache snapshot, in bytes. Available for Redis OSS and Serverless Memcached only.

" } }, "ServerlessCacheConfiguration": { "target": "com.amazonaws.elasticache#ServerlessCacheConfiguration", "traits": { - "smithy.api#documentation": "

The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis only.

" + "smithy.api#documentation": "

The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis OSS and Serverless Memcached only.

" } } }, "traits": { - "smithy.api#documentation": "

The resource representing a serverless cache snapshot. Available for Redis only.

" + "smithy.api#documentation": "

The resource representing a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#ServerlessCacheSnapshotAlreadyExistsFault": { @@ -14839,7 +14839,7 @@ "code": "ServerlessCacheSnapshotAlreadyExistsFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

A serverless cache snapshot with this name already exists. Available for Redis only.

", + "smithy.api#documentation": "

A serverless cache snapshot with this name already exists. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14865,7 +14865,7 @@ "code": "ServerlessCacheSnapshotNotFoundFault", "httpResponseCode": 404 }, - "smithy.api#documentation": "

This serverless cache snapshot could not be found or does not exist. Available for Redis only.

", + "smithy.api#documentation": "

This serverless cache snapshot could not be found or does not exist. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -14882,7 +14882,7 @@ "code": "ServerlessCacheSnapshotQuotaExceededFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis only.

", + "smithy.api#documentation": "

The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14958,13 +14958,13 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis or Memcached

" + "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" } }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine version to which the update applies. Either Redis or Memcached\n engine version

" + "smithy.api#documentation": "

The Elasticache engine version to which the update applies. Either Redis OSS or Memcached\n engine version.

" } }, "AutoUpdateAfterRecommendedApplyByDate": { @@ -14981,7 +14981,7 @@ } }, "traits": { - "smithy.api#documentation": "

An update that you can apply to your Redis clusters.

" + "smithy.api#documentation": "

An update that you can apply to your Redis OSS clusters.

" } }, "com.amazonaws.elasticache#ServiceUpdateList": { @@ -15184,7 +15184,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the compute and memory capacity node type for the source cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis configuration variables appendonly and\n appendfsync are not supported on Redis version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the compute and memory capacity node type for the source cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -15202,7 +15202,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of cache nodes in the source cluster.

\n

For clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" + "smithy.api#documentation": "

The number of cache nodes in the source cluster.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" } }, "PreferredAvailabilityZone": { @@ -15262,7 +15262,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -15286,7 +15286,7 @@ "AutomaticFailover": { "target": "com.amazonaws.elasticache#AutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "

Indicates the status of automatic failover for the source Redis replication\n group.

" + "smithy.api#documentation": "

Indicates the status of automatic failover for the source Redis OSS replication\n group.

" } }, "NodeSnapshots": { @@ -15315,7 +15315,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents a copy of an entire Redis cluster as of the time when the snapshot was\n taken.

" + "smithy.api#documentation": "

Represents a copy of an entire Redis OSS cluster as of the time when the snapshot was\n taken.

" } }, "com.amazonaws.elasticache#SnapshotAlreadyExistsFault": { @@ -15356,7 +15356,7 @@ "code": "SnapshotFeatureNotSupportedFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

You attempted one of the following operations:

\n
    \n
  • \n

    Creating a snapshot of a Redis cluster running on a\n cache.t1.micro cache node.

    \n
  • \n
  • \n

    Creating a snapshot of a cluster that is running Memcached rather than\n Redis.

    \n
  • \n
\n

Neither of these are supported by ElastiCache.

", + "smithy.api#documentation": "

You attempted one of the following operations:

\n
    \n
  • \n

    Creating a snapshot of a Redis OSS cluster running on a\n cache.t1.micro cache node.

    \n
  • \n
  • \n

    Creating a snapshot of a cluster that is running Memcached rather than\n Redis OSS.

    \n
  • \n
\n

Neither of these are supported by ElastiCache.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15504,7 +15504,7 @@ "target": "com.amazonaws.elasticache#CustomerNodeEndpointList", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

List of endpoints from which data should be migrated. For Redis (cluster mode\n disabled), list should have only one element.

", + "smithy.api#documentation": "

List of endpoints from which data should be migrated. For Redis OSS (cluster mode\n disabled), list should have only one element.

", "smithy.api#required": {} } } @@ -15551,7 +15551,7 @@ "SupportedNetworkTypes": { "target": "com.amazonaws.elasticache#NetworkTypeList", "traits": { - "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" } } }, @@ -15752,7 +15752,7 @@ } ], "traits": { - "smithy.api#documentation": "

Represents the input of a TestFailover operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).

\n

This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.

\n

\n Note the following\n

\n
    \n
  • \n

    A customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.

    \n
  • \n
  • \n

    If calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.

    \n

    \n
  • \n
  • \n

    If calling this operation multiple times on different shards in the same Redis\n (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.

    \n
  • \n
  • \n

    To determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:

    \n
      \n
    1. \n

      Replication group message: Test Failover API called for node\n group \n

      \n
    2. \n
    3. \n

      Cache cluster message: Failover from primary node\n to replica node \n completed\n

      \n
    4. \n
    5. \n

      Replication group message: Failover from primary node\n to replica node \n completed\n

      \n
    6. \n
    7. \n

      Cache cluster message: Recovering cache nodes\n \n

      \n
    8. \n
    9. \n

      Cache cluster message: Finished recovery for cache nodes\n \n

      \n
    10. \n
    \n

    For more information see:

    \n \n
  • \n
\n

Also see, Testing\n Multi-AZ in the ElastiCache User Guide.

" + "smithy.api#documentation": "

Represents the input of a TestFailover operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).

\n

This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.

\n

\n Note the following\n

\n
    \n
  • \n

    A customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.

    \n
  • \n
  • \n

    If calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.

    \n

    \n
  • \n
  • \n

    If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.

    \n
  • \n
  • \n

    To determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:

    \n
      \n
    1. \n

      Replication group message: Test Failover API called for node\n group \n

      \n
    2. \n
    3. \n

      Cache cluster message: Failover from primary node\n to replica node \n completed\n

      \n
    4. \n
    5. \n

      Replication group message: Failover from primary node\n to replica node \n completed\n

      \n
    6. \n
    7. \n

      Cache cluster message: Recovering cache nodes\n \n

      \n
    8. \n
    9. \n

      Cache cluster message: Finished recovery for cache nodes\n \n

      \n
    10. \n
    \n

    For more information see:

    \n \n
  • \n
\n

Also see, Testing\n Multi-AZ in the ElastiCache User Guide.

" } }, "com.amazonaws.elasticache#TestFailoverMessage": { @@ -16066,7 +16066,7 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis or Memcached

" + "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" } } }, @@ -16221,7 +16221,7 @@ "MinimumEngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The minimum engine version required, which is Redis 6.0

" + "smithy.api#documentation": "

The minimum engine version required, which is Redis OSS 6.0

" } }, "AccessString": { @@ -16285,7 +16285,7 @@ "Engine": { "target": "com.amazonaws.elasticache#EngineType", "traits": { - "smithy.api#documentation": "

The current supported value is Redis.

" + "smithy.api#documentation": "

The current supported value is Redis user.

" } }, "UserIds": { @@ -16297,7 +16297,7 @@ "MinimumEngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The minimum engine version required, which is Redis 6.0

" + "smithy.api#documentation": "

The minimum engine version required, which is Redis OSS 6.0

" } }, "PendingChanges": { @@ -16315,7 +16315,7 @@ "ServerlessCaches": { "target": "com.amazonaws.elasticache#UGServerlessCacheIdList", "traits": { - "smithy.api#documentation": "

Indicates which serverless caches the specified user group is associated with. Available for Redis only.

" + "smithy.api#documentation": "

Indicates which serverless caches the specified user group is associated with. Available for Redis OSS and Serverless Memcached only.

" } }, "ARN": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index de4927d664..4c24f2afa5 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1640,6 +1640,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -1649,6 +1650,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2126,12 +2128,24 @@ }, "ca-central-1" : { "variants" : [ { + "hostname" : "athena-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "athena.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { + "hostname" : "athena-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "athena.ca-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -2184,6 +2198,20 @@ "tags" : [ "dualstack" ] } ] }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -5367,12 +5395,6 @@ } ] }, "endpoints" : { - "af-south-1" : { - "hostname" : "datazone.af-south-1.api.aws" - }, - "ap-east-1" : { - "hostname" : "datazone.ap-east-1.api.aws" - }, "ap-northeast-1" : { "hostname" : "datazone.ap-northeast-1.api.aws" }, @@ -5382,9 +5404,6 @@ "ap-northeast-3" : { "hostname" : "datazone.ap-northeast-3.api.aws" }, - "ap-south-1" : { - "hostname" : "datazone.ap-south-1.api.aws" - }, "ap-south-2" : { "hostname" : "datazone.ap-south-2.api.aws" }, @@ -5413,18 +5432,12 @@ "eu-central-1" : { "hostname" : "datazone.eu-central-1.api.aws" }, - "eu-central-2" : { - "hostname" : "datazone.eu-central-2.api.aws" - }, "eu-north-1" : { "hostname" : "datazone.eu-north-1.api.aws" }, "eu-south-1" : { "hostname" : "datazone.eu-south-1.api.aws" }, - "eu-south-2" : { - "hostname" : "datazone.eu-south-2.api.aws" - }, "eu-west-1" : { "hostname" : "datazone.eu-west-1.api.aws" }, @@ -5479,6 +5492,8 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -10811,8 +10826,18 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -10821,14 +10846,76 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-west-2.amazonaws.com" + }, "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "kinesisvideo" : { @@ -19056,6 +19143,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -20348,6 +20436,7 @@ }, "vpc-lattice" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -20356,6 +20445,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -23710,8 +23800,6 @@ }, "endpoints" : { "us-gov-east-1" : { - "hostname" : "autoscaling-plans.us-gov-east-1.amazonaws.com", - "protocols" : [ "http", "https" ], "variants" : [ { "hostname" : "autoscaling-plans.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] @@ -23723,8 +23811,6 @@ "protocols" : [ "http", "https" ] }, "us-gov-west-1" : { - "hostname" : "autoscaling-plans.us-gov-west-1.amazonaws.com", - "protocols" : [ "http", "https" ], "variants" : [ { "hostname" : "autoscaling-plans.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] @@ -25924,8 +26010,32 @@ }, "kinesisanalytics" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "kinesisvideo" : { @@ -26438,6 +26548,12 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-us-gov-global" }, + "oam" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "oidc" : { "endpoints" : { "us-gov-east-1" : { @@ -28329,7 +28445,8 @@ }, "apigateway" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "appconfig" : { @@ -29205,6 +29322,11 @@ "us-isob-east-1" : { } } }, + "apigateway" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "appconfig" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/models/entityresolution.json b/models/entityresolution.json index e72464b596..692e3eb224 100644 --- a/models/entityresolution.json +++ b/models/entityresolution.json @@ -819,7 +819,7 @@ } }, "traits": { - "smithy.api#documentation": "

You do not have sufficient access to perform this action. HTTP Status Code:\n 403\n

", + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -884,7 +884,7 @@ "effect": { "target": "com.amazonaws.entityresolution#StatementEffect", "traits": { - "smithy.api#documentation": "

Determines whether the permissions specified in the policy are to be allowed\n (Allow) or denied (Deny).

", + "smithy.api#documentation": "

Determines whether the permissions specified in the policy are to be allowed\n (Allow) or denied (Deny).

\n \n

If you set the value of the effect parameter to Deny for\n the AddPolicyStatement operation, you must also set the value of the\n effect parameter in the policy to Deny for the\n PutPolicy operation.

\n
", "smithy.api#required": {} } }, @@ -1083,7 +1083,7 @@ } }, "traits": { - "smithy.api#documentation": "

The request could not be processed because of conflict in the current state of the\n resource. Example: Workflow already exists, Schema already exists, Workflow is currently\n running, etc. HTTP Status Code: 400\n

", + "smithy.api#documentation": "

The request could not be processed because of conflict in the current state of the\n resource. Example: Workflow already exists, Schema already exists, Workflow is currently\n running, etc.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -1157,15 +1157,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "

An object which defines the idMappingType and the\n providerProperties.

", + "smithy.api#documentation": "

An object which defines the ID mapping technique and any additional\n configurations.

", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.

" } }, "tags": { @@ -1218,15 +1219,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "

An object which defines the idMappingType and the\n providerProperties.

", + "smithy.api#documentation": "

An object which defines the ID mapping technique and any additional\n configurations.

", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.

" } } }, @@ -2176,7 +2178,7 @@ } }, "traits": { - "smithy.api#documentation": "

The request was rejected because it attempted to create resources beyond the current\n Entity Resolution account limits. The error message describes the limit exceeded.\n HTTP Status Code: 402\n

", + "smithy.api#documentation": "

The request was rejected because it attempted to create resources beyond the current\n Entity Resolution account limits. The error message describes the limit exceeded.\n

", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -2380,7 +2382,7 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "

An object which defines the idMappingType and the\n providerProperties.

", + "smithy.api#documentation": "

An object which defines the ID mapping technique and any additional\n configurations.

", "smithy.api#required": {} } }, @@ -2399,10 +2401,11 @@ } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.

" } }, "tags": { @@ -3221,13 +3224,13 @@ "inputRecords": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The total number of input records.

" + "smithy.api#documentation": "

The total number of records that were input for processing.

" } }, "totalRecordsProcessed": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The total number of records processed.

" + "smithy.api#documentation": "

The total number of records that were processed.

" } }, "recordsNotProcessed": { @@ -3235,10 +3238,28 @@ "traits": { "smithy.api#documentation": "

The total number of records that did not get processed.

" } + }, + "totalMappedRecords": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The total number of records that were mapped.

" + } + }, + "totalMappedSourceRecords": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The total number of mapped source records.

" + } + }, + "totalMappedTargetRecords": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The total number of distinct mapped target records.

" + } } }, "traits": { - "smithy.api#documentation": "

An object containing InputRecords, TotalRecordsProcessed,\n MatchIDs, and RecordsNotProcessed.

" + "smithy.api#documentation": "

An object containing InputRecords, RecordsNotProcessed,\n TotalRecordsProcessed, TotalMappedRecords,\n TotalMappedSourceRecords, and TotalMappedTargetRecords.

" } }, "com.amazonaws.entityresolution#IdMappingJobOutputSource": { @@ -3281,6 +3302,55 @@ } } }, + "com.amazonaws.entityresolution#IdMappingRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^$|^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + } + }, + "com.amazonaws.entityresolution#IdMappingRuleBasedProperties": { + "type": "structure", + "members": { + "rules": { + "target": "com.amazonaws.entityresolution#RuleList", + "traits": { + "smithy.api#documentation": "

The rules that can be used for ID mapping.

", + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "ruleDefinitionType": { + "target": "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionType", + "traits": { + "smithy.api#documentation": "

The set of rules you can use in an ID mapping workflow. The limitations specified for\n the source or target to define the match rules must be compatible.

", + "smithy.api#required": {} + } + }, + "attributeMatchingModel": { + "target": "com.amazonaws.entityresolution#AttributeMatchingModel", + "traits": { + "smithy.api#documentation": "

The comparison type. You can either choose ONE_TO_ONE or\n MANY_TO_MANY as the attributeMatchingModel.

\n

If you choose MANY_TO_MANY, the system can match attributes across the\n sub-types of an attribute type. For example, if the value of the Email field\n of Profile A matches the value of the BusinessEmail field of Profile B, the\n two profiles are matched on the Email attribute type.

\n

If you choose ONE_TO_ONE, the system can only match attributes if the\n sub-types are an exact match. For example, for the Email attribute type, the\n system will only consider it a match if the value of the Email field of\n Profile A matches the value of the Email field of Profile B.

", + "smithy.api#required": {} + } + }, + "recordMatchingModel": { + "target": "com.amazonaws.entityresolution#RecordMatchingModel", + "traits": { + "smithy.api#documentation": "

The type of matching record that is allowed to be used in an ID mapping workflow.

\n

If the value is set to ONE_SOURCE_TO_ONE_TARGET, only one record in the\n source can be matched to the same record in the target.

\n

If the value is set to MANY_SOURCE_TO_ONE_TARGET, multiple records in the\n source can be matched to one record in the target.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that defines the list of matching rules to run in an ID mapping\n workflow.

" + } + }, "com.amazonaws.entityresolution#IdMappingTechniques": { "type": "structure", "members": { @@ -3291,6 +3361,12 @@ "smithy.api#required": {} } }, + "ruleBasedProperties": { + "target": "com.amazonaws.entityresolution#IdMappingRuleBasedProperties", + "traits": { + "smithy.api#documentation": "

An object which defines any additional configurations required by rule-based\n matching.

" + } + }, "providerProperties": { "target": "com.amazonaws.entityresolution#ProviderProperties", "traits": { @@ -3299,7 +3375,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object which defines the ID mapping techniques and provider configurations.

" + "smithy.api#documentation": "

An object which defines the ID mapping technique and any additional\n configurations.

" } }, "com.amazonaws.entityresolution#IdMappingType": { @@ -3310,6 +3386,12 @@ "traits": { "smithy.api#enumValue": "PROVIDER" } + }, + "RULE_BASED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RULE_BASED" + } } } }, @@ -3325,7 +3407,7 @@ "inputSourceARN": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

An Glue table ARN for the input source table.

", + "smithy.api#documentation": "

An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for\n the input source table.

", "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})$", "smithy.api#required": {} } @@ -3339,7 +3421,7 @@ "type": { "target": "com.amazonaws.entityresolution#IdNamespaceType", "traits": { - "smithy.api#documentation": "

The type of ID namespace. There are two types: SOURCE and\n TARGET.

\n

The SOURCE contains configurations for sourceId data that will\n be processed in an ID mapping workflow.

\n

The TARGET contains a configuration of targetId to which all\n sourceIds will resolve to.

" + "smithy.api#documentation": "

The type of ID namespace. There are two types: SOURCE and\n TARGET.

\n

The SOURCE contains configurations for sourceId data that will\n be processed in an ID mapping workflow.

\n

The TARGET contains a configuration of targetId which all\n sourceIds will resolve to.

" } } }, @@ -3398,6 +3480,29 @@ } } }, + "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionType": { + "type": "enum", + "members": { + "SOURCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SOURCE" + } + }, + "TARGET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TARGET" + } + } + } + }, + "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionType" + } + }, "com.amazonaws.entityresolution#IdMappingWorkflowSummary": { "type": "structure", "members": { @@ -3440,6 +3545,33 @@ "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$" } }, + "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadata": { + "type": "structure", + "members": { + "idMappingType": { + "target": "com.amazonaws.entityresolution#IdMappingType", + "traits": { + "smithy.api#documentation": "

The type of ID mapping.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The settings for the ID namespace for the ID mapping workflow job.

" + } + }, + "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadataList": { + "type": "list", + "member": { + "target": "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadata" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowProperties": { "type": "structure", "members": { @@ -3450,6 +3582,12 @@ "smithy.api#required": {} } }, + "ruleBasedProperties": { + "target": "com.amazonaws.entityresolution#NamespaceRuleBasedProperties", + "traits": { + "smithy.api#documentation": "

An object which defines any additional configurations required by rule-based\n matching.

" + } + }, "providerProperties": { "target": "com.amazonaws.entityresolution#NamespaceProviderProperties", "traits": { @@ -3458,7 +3596,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object containing IdMappingType and\n ProviderProperties.

" + "smithy.api#documentation": "

An object containing IdMappingType, ProviderProperties, and\n RuleBasedProperties.

" } }, "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowPropertiesList": { @@ -3479,7 +3617,7 @@ "inputSourceARN": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

An Glue table ARN for the input source table.

", + "smithy.api#documentation": "

An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for\n the input source table.

", "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})$", "smithy.api#required": {} } @@ -3536,10 +3674,16 @@ "smithy.api#documentation": "

The description of the ID namespace.

" } }, + "idMappingWorkflowProperties": { + "target": "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadataList", + "traits": { + "smithy.api#documentation": "

An object which defines any additional configurations required by the ID mapping\n workflow.

" + } + }, "type": { "target": "com.amazonaws.entityresolution#IdNamespaceType", "traits": { - "smithy.api#documentation": "

The type of ID namespace. There are two types: SOURCE and\n TARGET.

\n

The SOURCE contains configurations for sourceId data that will\n be processed in an ID mapping workflow.

\n

The TARGET contains a configuration of targetId to which all\n sourceIds will resolve to.

", + "smithy.api#documentation": "

The type of ID namespace. There are two types: SOURCE and\n TARGET.

\n

The SOURCE contains configurations for sourceId data that will\n be processed in an ID mapping workflow.

\n

The TARGET contains a configuration of targetId which all\n sourceIds will resolve to.

", "smithy.api#required": {} } }, @@ -3610,7 +3754,7 @@ "inputSourceARN": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

An Glue table ARN for the input source table.

", + "smithy.api#documentation": "

An Glue table Amazon Resource Name (ARN) for the input source\n table.

", "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})$", "smithy.api#required": {} } @@ -3668,7 +3812,7 @@ } }, "traits": { - "smithy.api#documentation": "

This exception occurs when there is an internal failure in the Entity Resolution\n service. HTTP Status Code: 500\n

", + "smithy.api#documentation": "

This exception occurs when there is an internal failure in the Entity Resolution\n service.

", "smithy.api#error": "server", "smithy.api#httpError": 500, "smithy.api#retryable": {} @@ -4500,6 +4644,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.entityresolution#MatchPurpose": { + "type": "enum", + "members": { + "IDENTIFIER_GENERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IDENTIFIER_GENERATION" + } + }, + "INDEXING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INDEXING" + } + } + } + }, "com.amazonaws.entityresolution#MatchingKeys": { "type": "list", "member": { @@ -4582,6 +4743,42 @@ "smithy.api#documentation": "

An object containing ProviderConfiguration and\n ProviderServiceArn.

" } }, + "com.amazonaws.entityresolution#NamespaceRuleBasedProperties": { + "type": "structure", + "members": { + "rules": { + "target": "com.amazonaws.entityresolution#RuleList", + "traits": { + "smithy.api#documentation": "

The rules for the ID namespace.

", + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "ruleDefinitionTypes": { + "target": "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionTypeList", + "traits": { + "smithy.api#documentation": "

The sets of rules you can use in an ID mapping workflow. The limitations specified for\n the source and target must be compatible.

" + } + }, + "attributeMatchingModel": { + "target": "com.amazonaws.entityresolution#AttributeMatchingModel", + "traits": { + "smithy.api#documentation": "

The comparison type. You can either choose ONE_TO_ONE or\n MANY_TO_MANY as the attributeMatchingModel.

\n

If you choose MANY_TO_MANY, the system can match attributes across the\n sub-types of an attribute type. For example, if the value of the Email field\n of Profile A matches the value of BusinessEmail field of Profile B, the two\n profiles are matched on the Email attribute type.

\n

If you choose ONE_TO_ONE, the system can only match attributes if the\n sub-types are an exact match. For example, for the Email attribute type, the\n system will only consider it a match if the value of the Email field of\n Profile A matches the value of the Email field of Profile B.

" + } + }, + "recordMatchingModels": { + "target": "com.amazonaws.entityresolution#RecordMatchingModelList", + "traits": { + "smithy.api#documentation": "

The type of matching record that is allowed to be used in an ID mapping workflow.

\n

If the value is set to ONE_SOURCE_TO_ONE_TARGET, only one record in the\n source is matched to one record in the target.

\n

If the value is set to MANY_SOURCE_TO_ONE_TARGET, all matching records in\n the source are matched to one record in the target.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The rule-based properties of an ID namespace. These properties define how the ID\n namespace can be used in an ID mapping workflow.

" + } + }, "com.amazonaws.entityresolution#NextToken": { "type": "string", "traits": { @@ -4733,7 +4930,7 @@ "providerTargetConfigurationDefinition": { "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "

Configurations required for the target ID namespace.

" + "smithy.api#documentation": "

Configurations required for the target ID namespace.

" } }, "providerSourceConfigurationDefinition": { @@ -4995,7 +5192,7 @@ "policy": { "target": "com.amazonaws.entityresolution#PolicyDocument", "traits": { - "smithy.api#documentation": "

The resource-based policy.

", + "smithy.api#documentation": "

The resource-based policy.

\n \n

If you set the value of the effect parameter in the policy\n to Deny for the PutPolicy operation, you must also set the\n value of the effect parameter to Deny for the\n AddPolicyStatement operation.

\n
", "smithy.api#required": {} } } @@ -5056,6 +5253,29 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.entityresolution#RecordMatchingModel": { + "type": "enum", + "members": { + "ONE_SOURCE_TO_ONE_TARGET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ONE_SOURCE_TO_ONE_TARGET" + } + }, + "MANY_SOURCE_TO_ONE_TARGET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MANY_SOURCE_TO_ONE_TARGET" + } + } + } + }, + "com.amazonaws.entityresolution#RecordMatchingModelList": { + "type": "list", + "member": { + "target": "com.amazonaws.entityresolution#RecordMatchingModel" + } + }, "com.amazonaws.entityresolution#RequiredBucketActionsList": { "type": "list", "member": { @@ -5120,7 +5340,7 @@ } }, "traits": { - "smithy.api#documentation": "

The resource could not be found. HTTP Status Code: 404\n

", + "smithy.api#documentation": "

The resource could not be found.

", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -5182,13 +5402,19 @@ "attributeMatchingModel": { "target": "com.amazonaws.entityresolution#AttributeMatchingModel", "traits": { - "smithy.api#documentation": "

The comparison type. You can either choose ONE_TO_ONE or\n MANY_TO_MANY as the AttributeMatchingModel. When choosing\n MANY_TO_MANY, the system can match attributes across the sub-types of an\n attribute type. For example, if the value of the Email field of Profile A and\n the value of BusinessEmail field of Profile B matches, the two profiles are\n matched on the Email type. When choosing ONE_TO_ONE ,the system\n can only match if the sub-types are exact matches. For example, only when the value of the\n Email field of Profile A and the value of the Email field of\n Profile B matches, the two profiles are matched on the Email type.

", + "smithy.api#documentation": "

The comparison type. You can either choose ONE_TO_ONE or\n MANY_TO_MANY as the attributeMatchingModel.

\n

If you choose MANY_TO_MANY, the system can match attributes across the\n sub-types of an attribute type. For example, if the value of the Email field\n of Profile A and the value of BusinessEmail field of Profile B matches, the\n two profiles are matched on the Email attribute type.

\n

If you choose ONE_TO_ONE, the system can only match attributes if the\n sub-types are an exact match. For example, for the Email attribute type, the\n system will only consider it a match if the value of the Email field of\n Profile A matches the value of the Email field of Profile B.

", "smithy.api#required": {} } + }, + "matchPurpose": { + "target": "com.amazonaws.entityresolution#MatchPurpose", + "traits": { + "smithy.api#documentation": "

An indicator of whether to generate IDs and index the data or not.

\n

If you choose IDENTIFIER_GENERATION, the process generates IDs and indexes\n the data.

\n

If you choose INDEXING, the process indexes the data without generating\n IDs.

" + } } }, "traits": { - "smithy.api#documentation": "

An object which defines the list of matching rules to run and has a field\n Rules, which is a list of rule objects.

" + "smithy.api#documentation": "

An object which defines the list of matching rules to run in a matching workflow.\n RuleBasedProperties contain a Rules field, which is a list of rule\n objects.

" } }, "com.amazonaws.entityresolution#RuleList": { @@ -5358,7 +5584,7 @@ "matchKey": { "target": "com.amazonaws.entityresolution#AttributeName", "traits": { - "smithy.api#documentation": "

A key that allows grouping of multiple input attributes into a unified matching group.\n For example, consider a scenario where the source table contains various addresses, such as\n business_address and shipping_address. By assigning a\n matchKey called address to both attributes, Entity Resolution\n will match records across these fields to create a consolidated matching group. If no\n matchKey is specified for a column, it won't be utilized for matching\n purposes but will still be included in the output table.

" + "smithy.api#documentation": "

A key that allows grouping of multiple input attributes into a unified matching group.

\n

For example, consider a scenario where the source table contains various addresses, such\n as business_address and shipping_address. By assigning a\n matchKey called address to both attributes, Entity Resolution\n will match records across these fields to create a consolidated matching group.

\n

If no matchKey is specified for a column, it won't be utilized for matching\n purposes but will still be included in the output table.

" } }, "subType": { @@ -5366,10 +5592,16 @@ "traits": { "smithy.api#documentation": "

The subtype of the attribute, selected from a list of values.

" } + }, + "hashed": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates if the column values are hashed in the schema input. If the value is set to\n TRUE, the column values are hashed. If the value is set to\n FALSE, the column values are cleartext.

" + } } }, "traits": { - "smithy.api#documentation": "

An object containing FieldName, Type, GroupName,\n MatchKey, and SubType.

" + "smithy.api#documentation": "

An object containing FieldName, Type, GroupName,\n MatchKey, Hashing, and SubType.

" } }, "com.amazonaws.entityresolution#SchemaInputAttributes": { @@ -5810,7 +6042,7 @@ } }, "traits": { - "smithy.api#documentation": "

The request was denied due to request throttling. HTTP Status Code:\n 429\n

", + "smithy.api#documentation": "

The request was denied due to request throttling.

", "smithy.api#error": "client", "smithy.api#httpError": 429, "smithy.api#retryable": { @@ -5825,7 +6057,7 @@ "min": 1, "max": 760 }, - "smithy.api#pattern": "^[a-zA-Z_0-9-,]*$" + "smithy.api#pattern": "^[a-zA-Z_0-9-+=/,]*$" } }, "com.amazonaws.entityresolution#UniqueIdList": { @@ -5958,15 +6190,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "

An object which defines the idMappingType and the\n providerProperties.

", + "smithy.api#documentation": "

An object which defines the ID mapping technique and any additional\n configurations.

", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.

" } } }, @@ -6013,15 +6246,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "

An object which defines the idMappingType and the\n providerProperties.

", + "smithy.api#documentation": "

An object which defines the ID mapping technique and any additional\n configurations.

", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.

" } } }, @@ -6427,7 +6661,7 @@ } }, "traits": { - "smithy.api#documentation": "

The input fails to satisfy the constraints specified by Entity Resolution. HTTP\n Status Code: 400\n

", + "smithy.api#documentation": "

The input fails to satisfy the constraints specified by Entity Resolution.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/models/eventbridge.json b/models/eventbridge.json index 9756587b89..e61df2d72e 100644 --- a/models/eventbridge.json +++ b/models/eventbridge.json @@ -4801,7 +4801,22 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the specified rule.

\n

DescribeRule does not list the targets of a rule. To see the targets associated with a\n rule, use ListTargetsByRule.

" + "smithy.api#documentation": "

Describes the specified rule.

\n

DescribeRule does not list the targets of a rule. To see the targets associated with a\n rule, use ListTargetsByRule.

", + "smithy.test#smokeTests": [ + { + "id": "DescribeRuleFailure", + "params": { + "Name": "fake-rule" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.eventbridge#DescribeRuleRequest": { @@ -6537,7 +6552,20 @@ } ], "traits": { - "smithy.api#documentation": "

Lists your Amazon EventBridge rules. You can either list all the rules or you can\n provide a prefix to match to the rule names.

\n

The maximum number of results per page for requests is 100.

\n

ListRules does not list the targets of a rule. To see the targets associated with a rule,\n use ListTargetsByRule.

" + "smithy.api#documentation": "

Lists your Amazon EventBridge rules. You can either list all the rules or you can\n provide a prefix to match to the rule names.

\n

The maximum number of results per page for requests is 100.

\n

ListRules does not list the targets of a rule. To see the targets associated with a rule,\n use ListTargetsByRule.

", + "smithy.test#smokeTests": [ + { + "id": "ListRulesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.eventbridge#ListRulesRequest": { diff --git a/models/fis.json b/models/fis.json index c5c244f6ee..472715c049 100644 --- a/models/fis.json +++ b/models/fis.json @@ -1183,6 +1183,48 @@ "com.amazonaws.fis#ExperimentEndTime": { "type": "timestamp" }, + "com.amazonaws.fis#ExperimentError": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.fis#ExperimentErrorAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Account ID where the experiment failure occurred.

" + } + }, + "code": { + "target": "com.amazonaws.fis#ExperimentErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for the failed experiment.

" + } + }, + "location": { + "target": "com.amazonaws.fis#ExperimentErrorLocation", + "traits": { + "smithy.api#documentation": "

Context for the section of the experiment template that failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the error when an experiment has failed.

" + } + }, + "com.amazonaws.fis#ExperimentErrorAccountId": { + "type": "string" + }, + "com.amazonaws.fis#ExperimentErrorCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.fis#ExperimentErrorLocation": { + "type": "string" + }, "com.amazonaws.fis#ExperimentId": { "type": "string", "traits": { @@ -1282,6 +1324,12 @@ "traits": { "smithy.api#documentation": "

The reason for the state.

" } + }, + "error": { + "target": "com.amazonaws.fis#ExperimentError", + "traits": { + "smithy.api#documentation": "

The error information of the experiment when the action has failed.

" + } } }, "traits": { @@ -2338,7 +2386,7 @@ "name": "fis" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Fault Injection Service is a managed service that enables you to perform fault injection \n experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide.

", + "smithy.api#documentation": "

Amazon Web Services Fault Injection Service is a managed service that enables you to perform fault injection \n experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide.

", "smithy.api#title": "AWS Fault Injection Simulator", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/glue.json b/models/glue.json index 1f7dff017e..c9055373e8 100644 --- a/models/glue.json +++ b/models/glue.json @@ -78,6 +78,9 @@ { "target": "com.amazonaws.glue#BatchGetWorkflows" }, + { + "target": "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotation" + }, { "target": "com.amazonaws.glue#BatchStopJobRun" }, @@ -309,6 +312,12 @@ { "target": "com.amazonaws.glue#GetDataflowGraph" }, + { + "target": "com.amazonaws.glue#GetDataQualityModel" + }, + { + "target": "com.amazonaws.glue#GetDataQualityModelResult" + }, { "target": "com.amazonaws.glue#GetDataQualityResult" }, @@ -486,6 +495,12 @@ { "target": "com.amazonaws.glue#ListDataQualityRulesets" }, + { + "target": "com.amazonaws.glue#ListDataQualityStatisticAnnotations" + }, + { + "target": "com.amazonaws.glue#ListDataQualityStatistics" + }, { "target": "com.amazonaws.glue#ListDevEndpoints" }, @@ -525,6 +540,9 @@ { "target": "com.amazonaws.glue#PutDataCatalogEncryptionSettings" }, + { + "target": "com.amazonaws.glue#PutDataQualityProfileAnnotation" + }, { "target": "com.amazonaws.glue#PutResourcePolicy" }, @@ -2221,6 +2239,44 @@ "smithy.api#documentation": "

Specifies an Amazon Redshift target.

" } }, + "com.amazonaws.glue#AnnotationError": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID for the failed annotation.

" + } + }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID for the failed annotation.

" + } + }, + "FailureReason": { + "target": "com.amazonaws.glue#DescriptionString", + "traits": { + "smithy.api#documentation": "

The reason why the annotation failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A failed annotation.

" + } + }, + "com.amazonaws.glue#AnnotationErrorList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#AnnotationError" + } + }, + "com.amazonaws.glue#AnnotationList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#StatisticAnnotation" + } + }, "com.amazonaws.glue#ApplyMapping": { "type": "structure", "members": { @@ -2560,6 +2616,12 @@ "smithy.api#required": {} } }, + "PartitionKeys": { + "target": "com.amazonaws.glue#GlueStudioPathList", + "traits": { + "smithy.api#documentation": "

The partition keys used to distribute data across multiple partitions or shards based on a specific key or set of key.

" + } + }, "Database": { "target": "com.amazonaws.glue#EnclosedInStringProperty", "traits": { @@ -3724,6 +3786,67 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotation": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationRequest" + }, + "output": { + "target": "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#ResourceNumberLimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

Annotate datapoints over time for a specific data quality statistic.

" + } + }, + "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationRequest": { + "type": "structure", + "members": { + "InclusionAnnotations": { + "target": "com.amazonaws.glue#InclusionAnnotationList", + "traits": { + "smithy.api#documentation": "

A list of DatapointInclusionAnnotation's.

", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

Client Token.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationResponse": { + "type": "structure", + "members": { + "FailedInclusionAnnotations": { + "target": "com.amazonaws.glue#AnnotationErrorList", + "traits": { + "smithy.api#documentation": "

A list of AnnotationError's.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#BatchSize": { "type": "integer", "traits": { @@ -6705,7 +6828,7 @@ "ConnectionProperties": { "target": "com.amazonaws.glue#ConnectionProperties", "traits": { - "smithy.api#documentation": "

These key-value pairs define parameters for the connection:

\n
    \n
  • \n

    \n HOST - The host URI: either the\n fully qualified domain name (FQDN) or the IPv4 address of\n the database host.

    \n
  • \n
  • \n

    \n PORT - The port number, between\n 1024 and 65535, of the port on which the database host is\n listening for database connections.

    \n
  • \n
  • \n

    \n USER_NAME - The name under which\n to log in to the database. The value string for USER_NAME is \"USERNAME\".

    \n
  • \n
  • \n

    \n PASSWORD - A password,\n if one is used, for the user name.

    \n
  • \n
  • \n

    \n ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

    \n
  • \n
  • \n

    \n JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the\n JAR file that contains the JDBC driver to use.

    \n
  • \n
  • \n

    \n JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

    \n
  • \n
  • \n

    \n JDBC_ENGINE - The name of the JDBC engine to use.

    \n
  • \n
  • \n

    \n JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

    \n
  • \n
  • \n

    \n CONFIG_FILES - (Reserved for future use.)

    \n
  • \n
  • \n

    \n INSTANCE_ID - The instance ID to use.

    \n
  • \n
  • \n

    \n JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

    \n
  • \n
  • \n

    \n JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure\n Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the\n client. The default is false.

    \n
  • \n
  • \n

    \n CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

    \n
  • \n
  • \n

    \n SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

    \n
  • \n
  • \n

    \n CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

    \n
  • \n
  • \n

    \n CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

    \n
  • \n
  • \n

    \n SECRET_ID - The secret ID used for the secret manager of credentials.

    \n
  • \n
  • \n

    \n CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

    \n
  • \n
  • \n

    \n KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

    \n
  • \n
  • \n

    \n KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

    \n
  • \n
  • \n

    \n KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", \"AWS_MSK_IAM\", or \"PLAIN\". These are the supported SASL Mechanisms.

    \n
  • \n
  • \n

    \n KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the \"PLAIN\" mechanism.

    \n
  • \n
  • \n

    \n KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the \"PLAIN\" mechanism.

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

    \n
  • \n
" + "smithy.api#documentation": "

These key-value pairs define parameters for the connection:

\n
    \n
  • \n

    \n HOST - The host URI: either the\n fully qualified domain name (FQDN) or the IPv4 address of\n the database host.

    \n
  • \n
  • \n

    \n PORT - The port number, between\n 1024 and 65535, of the port on which the database host is\n listening for database connections.

    \n
  • \n
  • \n

    \n USER_NAME - The name under which\n to log in to the database. The value string for USER_NAME is \"USERNAME\".

    \n
  • \n
  • \n

    \n PASSWORD - A password,\n if one is used, for the user name.

    \n
  • \n
  • \n

    \n ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

    \n
  • \n
  • \n

    \n JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the\n JAR file that contains the JDBC driver to use.

    \n
  • \n
  • \n

    \n JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

    \n
  • \n
  • \n

    \n JDBC_ENGINE - The name of the JDBC engine to use.

    \n
  • \n
  • \n

    \n JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

    \n
  • \n
  • \n

    \n CONFIG_FILES - (Reserved for future use.)

    \n
  • \n
  • \n

    \n INSTANCE_ID - The instance ID to use.

    \n
  • \n
  • \n

    \n JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

    \n
  • \n
  • \n

    \n JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure\n Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the\n client. The default is false.

    \n
  • \n
  • \n

    \n CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

    \n
  • \n
  • \n

    \n SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

    \n
  • \n
  • \n

    \n CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

    \n
  • \n
  • \n

    \n CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

    \n
  • \n
  • \n

    \n SECRET_ID - The secret ID used for the secret manager of credentials.

    \n
  • \n
  • \n

    \n CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

    \n
  • \n
  • \n

    \n KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

    \n
  • \n
  • \n

    \n KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

    \n
  • \n
  • \n

    \n KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", \"AWS_MSK_IAM\", or \"PLAIN\". These are the supported SASL Mechanisms.

    \n
  • \n
  • \n

    \n KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the \"PLAIN\" mechanism.

    \n
  • \n
  • \n

    \n KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the \"PLAIN\" mechanism.

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

    \n
  • \n
  • \n

    \n ROLE_ARN - The role to be used for running queries.

    \n
  • \n
  • \n

    \n REGION - The Amazon Web Services Region where queries will be run.

    \n
  • \n
  • \n

    \n WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.

    \n
  • \n
  • \n

    \n CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in which queries will run.

    \n
  • \n
  • \n

    \n DATABASE - The Amazon Redshift database that you are connecting to.

    \n
  • \n
" } }, "PhysicalConnectionRequirements": { @@ -6780,7 +6903,7 @@ "ConnectionType": { "target": "com.amazonaws.glue#ConnectionType", "traits": { - "smithy.api#documentation": "

The type of the connection. Currently, these types are supported:

\n
    \n
  • \n

    \n JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

    \n

    \n JDBC Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    • \n

      Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

      \n
    • \n
    \n
  • \n
  • \n

    \n KAFKA - Designates a connection to an Apache Kafka streaming platform.

    \n

    \n KAFKA Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: KAFKA_BOOTSTRAP_SERVERS.

      \n
    • \n
    • \n

      Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA.

      \n
    • \n
    \n
  • \n
  • \n

    \n MONGODB - Designates a connection to a MongoDB document database.

    \n

    \n MONGODB Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n SALESFORCE - Designates a connection to Salesforce using OAuth authencation.

    \n
      \n
    • \n

      Requires the AuthenticationConfiguration member to be configured.

      \n
    • \n
    \n
  • \n
  • \n

    \n NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

    \n

    \n NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.

    \n
  • \n
  • \n

    \n MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

    \n

    \n MARKETPLACE Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL.

      \n
    • \n
    • \n

      Required for JDBC\n CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

    \n
  • \n
\n

\n SFTP is not supported.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.

", + "smithy.api#documentation": "

The type of the connection. Currently, these types are supported:

\n
    \n
  • \n

    \n JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

    \n

    \n JDBC Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    • \n

      Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

      \n
    • \n
    \n
  • \n
  • \n

    \n KAFKA - Designates a connection to an Apache Kafka streaming platform.

    \n

    \n KAFKA Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: KAFKA_BOOTSTRAP_SERVERS.

      \n
    • \n
    • \n

      Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA.

      \n
    • \n
    \n
  • \n
  • \n

    \n MONGODB - Designates a connection to a MongoDB document database.

    \n

    \n MONGODB Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n SALESFORCE - Designates a connection to Salesforce using OAuth authencation.

    \n
      \n
    • \n

      Requires the AuthenticationConfiguration member to be configured.

      \n
    • \n
    \n
  • \n
  • \n

    \n VIEW_VALIDATION_REDSHIFT - Designates a connection used for view validation by Amazon Redshift.

    \n
  • \n
  • \n

    \n VIEW_VALIDATION_ATHENA - Designates a connection used for view validation by Amazon Athena.

    \n
  • \n
  • \n

    \n NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

    \n

    \n NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.

    \n
  • \n
  • \n

    \n MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

    \n

    \n MARKETPLACE Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL.

      \n
    • \n
    • \n

      Required for JDBC\n CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

    \n
  • \n
\n

\n SFTP is not supported.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.

", "smithy.api#required": {} } }, @@ -7127,6 +7250,30 @@ "traits": { "smithy.api#enumValue": "ROLE_ARN" } + }, + "REGION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REGION" + } + }, + "WORKGROUP_NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WORKGROUP_NAME" + } + }, + "CLUSTER_IDENTIFIER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLUSTER_IDENTIFIER" + } + }, + "DATABASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATABASE" + } } } }, @@ -7203,6 +7350,18 @@ "traits": { "smithy.api#enumValue": "SALESFORCE" } + }, + "VIEW_VALIDATION_REDSHIFT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VIEW_VALIDATION_REDSHIFT" + } + }, + "VIEW_VALIDATION_ATHENA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VIEW_VALIDATION_ATHENA" + } } } }, @@ -8503,6 +8662,12 @@ "smithy.api#documentation": "

A target table associated with the data quality ruleset.

" } }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the security configuration created with the data quality encryption option.

" + } + }, "ClientToken": { "target": "com.amazonaws.glue#HashString", "traits": { @@ -11121,6 +11286,29 @@ "smithy.api#documentation": "

Describes the data quality metric value according to the analysis of historical data.

" } }, + "com.amazonaws.glue#DataQualityModelStatus": { + "type": "enum", + "members": { + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.glue#DataQualityObservation": { "type": "structure", "members": { @@ -11148,7 +11336,8 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$", + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#DataQualityObservations": { @@ -11172,6 +11361,12 @@ "smithy.api#documentation": "

A unique result ID for the data quality result.

" } }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID for the data quality result.

" + } + }, "Score": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { @@ -11458,6 +11653,12 @@ "traits": { "smithy.api#documentation": "

A map of metrics associated with the evaluation of the rule.

" } + }, + "EvaluatedRule": { + "target": "com.amazonaws.glue#DataQualityRuleResultDescription", + "traits": { + "smithy.api#documentation": "

The evaluated rule.

" + } } }, "traits": { @@ -11471,7 +11672,8 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$", + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#DataQualityRuleResultStatus": { @@ -11926,6 +12128,32 @@ } } }, + "com.amazonaws.glue#DatapointInclusionAnnotation": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The ID of the data quality profile the statistic belongs to.

" + } + }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "

The inclusion annotation value to apply to the statistic.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An Inclusion Annotation.

" + } + }, "com.amazonaws.glue#Datatype": { "type": "structure", "members": { @@ -14686,6 +14914,9 @@ }, "value": { "target": "com.amazonaws.glue#NullableDouble" + }, + "traits": { + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#EvaluationMetrics": { @@ -15670,7 +15901,20 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the status of a migration operation.

" + "smithy.api#documentation": "

Retrieves the status of a migration operation.

", + "smithy.test#smokeTests": [ + { + "id": "GetCatalogImportStatusSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.glue#GetCatalogImportStatusRequest": { @@ -16605,6 +16849,153 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#GetDataQualityModel": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetDataQualityModelRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetDataQualityModelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieve the training status of the model along with more information (CompletedOn, StartedOn, FailureReason).

" + } + }, + "com.amazonaws.glue#GetDataQualityModelRequest": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

" + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#GetDataQualityModelResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.glue#DataQualityModelStatus", + "traits": { + "smithy.api#documentation": "

The training status of the data quality model.

" + } + }, + "StartedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the data quality model training started.

" + } + }, + "CompletedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the data quality model training completed.

" + } + }, + "FailureReason": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The training failure reason.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.glue#GetDataQualityModelResult": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetDataQualityModelResultRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetDataQualityModelResultResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieve a statistic's predictions for a given Profile ID.

" + } + }, + "com.amazonaws.glue#GetDataQualityModelResultRequest": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

", + "smithy.api#required": {} + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#GetDataQualityModelResultResponse": { + "type": "structure", + "members": { + "CompletedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the data quality model training completed.

" + } + }, + "Model": { + "target": "com.amazonaws.glue#StatisticModelResults", + "traits": { + "smithy.api#documentation": "

A list of StatisticModelResult\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#GetDataQualityResult": { "type": "operation", "input": { @@ -16655,6 +17046,12 @@ "smithy.api#documentation": "

A unique result ID for the data quality result.

" } }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID for the data quality result.

" + } + }, "Score": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { @@ -16854,6 +17251,12 @@ "traits": { "smithy.api#documentation": "

The name of the ruleset that was created by the run.

" } + }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the security configuration created with the data quality encryption option.

" + } } }, "traits": { @@ -17085,6 +17488,12 @@ "traits": { "smithy.api#documentation": "

When a ruleset was created from a recommendation run, this run ID is generated to link the two together.

" } + }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the security configuration created with the data quality encryption option.

" + } } }, "traits": { @@ -19748,6 +20157,12 @@ "traits": { "smithy.api#documentation": "

The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

" } + }, + "IncludeStatusDetails": { + "target": "com.amazonaws.glue#BooleanNullable", + "traits": { + "smithy.api#documentation": "

Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.

" + } } }, "traits": { @@ -20030,6 +20445,18 @@ "traits": { "smithy.api#documentation": "

The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

" } + }, + "IncludeStatusDetails": { + "target": "com.amazonaws.glue#BooleanNullable", + "traits": { + "smithy.api#documentation": "

Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.

" + } + }, + "AttributesToGet": { + "target": "com.amazonaws.glue#TableAttributesList", + "traits": { + "smithy.api#documentation": "

Specifies the table fields returned by the GetTables call. This parameter doesn’t accept an empty list. The request must include NAME.

\n

The following are the valid combinations of values:

\n
    \n
  • \n

    \n NAME - Names of all tables in the database.

    \n
  • \n
  • \n

    \n NAME, TABLE_TYPE - Names of all tables and the table types.

    \n
  • \n
" + } } }, "traits": { @@ -20048,7 +20475,7 @@ "NextToken": { "target": "com.amazonaws.glue#Token", "traits": { - "smithy.api#documentation": "

A continuation token, present if the current list segment is\n not the last.

" + "smithy.api#documentation": "

A continuation token, present if the current list segment is not the last.

" } } }, @@ -21912,6 +22339,29 @@ "smithy.api#documentation": "

Specifies configuration properties for an importing labels task run.

" } }, + "com.amazonaws.glue#InclusionAnnotationList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#DatapointInclusionAnnotation" + } + }, + "com.amazonaws.glue#InclusionAnnotationValue": { + "type": "enum", + "members": { + "INCLUDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCLUDE" + } + }, + "EXCLUDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXCLUDE" + } + } + } + }, "com.amazonaws.glue#Integer": { "type": "integer", "traits": { @@ -24400,13 +24850,87 @@ "smithy.api#input": {} } }, - "com.amazonaws.glue#ListDataQualityRuleRecommendationRunsResponse": { + "com.amazonaws.glue#ListDataQualityRuleRecommendationRunsResponse": { + "type": "structure", + "members": { + "Runs": { + "target": "com.amazonaws.glue#DataQualityRuleRecommendationRunList", + "traits": { + "smithy.api#documentation": "

A list of DataQualityRuleRecommendationRunDescription objects.

" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "

A pagination token, if more results are available.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.glue#ListDataQualityRulesetEvaluationRuns": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest" + }, + "output": { + "target": "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all the runs meeting the filter criteria, where a ruleset is evaluated against a data source.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest": { + "type": "structure", + "members": { + "Filter": { + "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunFilter", + "traits": { + "smithy.api#documentation": "

The filter criteria.

" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "

A paginated token to offset the results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.glue#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse": { "type": "structure", "members": { "Runs": { - "target": "com.amazonaws.glue#DataQualityRuleRecommendationRunList", + "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunList", "traits": { - "smithy.api#documentation": "

A list of DataQualityRuleRecommendationRunDescription objects.

" + "smithy.api#documentation": "

A list of DataQualityRulesetEvaluationRunDescription objects representing data quality ruleset runs.

" } }, "NextToken": { @@ -24420,15 +24944,18 @@ "smithy.api#output": {} } }, - "com.amazonaws.glue#ListDataQualityRulesetEvaluationRuns": { + "com.amazonaws.glue#ListDataQualityRulesets": { "type": "operation", "input": { - "target": "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest" + "target": "com.amazonaws.glue#ListDataQualityRulesetsRequest" }, "output": { - "target": "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse" + "target": "com.amazonaws.glue#ListDataQualityRulesetsResponse" }, "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, { "target": "com.amazonaws.glue#InternalServiceException" }, @@ -24440,7 +24967,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all the runs meeting the filter criteria, where a ruleset is evaluated against a data source.

", + "smithy.api#documentation": "

Returns a paginated list of rulesets for the specified list of Glue tables.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -24448,25 +24975,109 @@ } } }, - "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest": { + "com.amazonaws.glue#ListDataQualityRulesetsRequest": { "type": "structure", "members": { + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "

A paginated token to offset the results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.glue#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return.

" + } + }, "Filter": { - "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunFilter", + "target": "com.amazonaws.glue#DataQualityRulesetFilterCriteria", "traits": { - "smithy.api#documentation": "

The filter criteria.

" + "smithy.api#documentation": "

The filter criteria.

" + } + }, + "Tags": { + "target": "com.amazonaws.glue#TagsMap", + "traits": { + "smithy.api#documentation": "

A list of key-value pair tags.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#ListDataQualityRulesetsResponse": { + "type": "structure", + "members": { + "Rulesets": { + "target": "com.amazonaws.glue#DataQualityRulesetList", + "traits": { + "smithy.api#documentation": "

A paginated list of rulesets for the specified list of Glue tables.

" } }, "NextToken": { "target": "com.amazonaws.glue#PaginationToken", "traits": { - "smithy.api#documentation": "

A paginated token to offset the results.

" + "smithy.api#documentation": "

A pagination token, if more results are available.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.glue#ListDataQualityStatisticAnnotations": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#ListDataQualityStatisticAnnotationsRequest" + }, + "output": { + "target": "com.amazonaws.glue#ListDataQualityStatisticAnnotationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieve annotations for a data quality statistic.

" + } + }, + "com.amazonaws.glue#ListDataQualityStatisticAnnotationsRequest": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

" + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID.

" + } + }, + "TimestampFilter": { + "target": "com.amazonaws.glue#TimestampFilter", + "traits": { + "smithy.api#documentation": "

A timestamp filter.

" } }, "MaxResults": { "target": "com.amazonaws.glue#PageSize", "traits": { - "smithy.api#documentation": "

The maximum number of results to return.

" + "smithy.api#documentation": "

The maximum number of results to return in this request.

" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "

A pagination token to retrieve the next set of results.

" } } }, @@ -24474,19 +25085,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse": { + "com.amazonaws.glue#ListDataQualityStatisticAnnotationsResponse": { "type": "structure", "members": { - "Runs": { - "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunList", + "Annotations": { + "target": "com.amazonaws.glue#AnnotationList", "traits": { - "smithy.api#documentation": "

A list of DataQualityRulesetEvaluationRunDescription objects representing data quality ruleset runs.

" + "smithy.api#documentation": "

A list of StatisticAnnotation applied to the Statistic

" } }, "NextToken": { "target": "com.amazonaws.glue#PaginationToken", "traits": { - "smithy.api#documentation": "

A pagination token, if more results are available.

" + "smithy.api#documentation": "

A pagination token to retrieve the next set of results.

" } } }, @@ -24494,13 +25105,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.glue#ListDataQualityRulesets": { + "com.amazonaws.glue#ListDataQualityStatistics": { "type": "operation", "input": { - "target": "com.amazonaws.glue#ListDataQualityRulesetsRequest" + "target": "com.amazonaws.glue#ListDataQualityStatisticsRequest" }, "output": { - "target": "com.amazonaws.glue#ListDataQualityRulesetsResponse" + "target": "com.amazonaws.glue#ListDataQualityStatisticsResponse" }, "errors": [ { @@ -24511,45 +25122,43 @@ }, { "target": "com.amazonaws.glue#InvalidInputException" - }, - { - "target": "com.amazonaws.glue#OperationTimeoutException" } ], "traits": { - "smithy.api#documentation": "

Returns a paginated list of rulesets for the specified list of Glue tables.

", - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } + "smithy.api#documentation": "

Retrieves a list of data quality statistics.

" } }, - "com.amazonaws.glue#ListDataQualityRulesetsRequest": { + "com.amazonaws.glue#ListDataQualityStatisticsRequest": { "type": "structure", "members": { - "NextToken": { - "target": "com.amazonaws.glue#PaginationToken", + "StatisticId": { + "target": "com.amazonaws.glue#HashString", "traits": { - "smithy.api#documentation": "

A paginated token to offset the results.

" + "smithy.api#documentation": "

The Statistic ID.

" } }, - "MaxResults": { - "target": "com.amazonaws.glue#PageSize", + "ProfileId": { + "target": "com.amazonaws.glue#HashString", "traits": { - "smithy.api#documentation": "

The maximum number of results to return.

" + "smithy.api#documentation": "

The Profile ID.

" } }, - "Filter": { - "target": "com.amazonaws.glue#DataQualityRulesetFilterCriteria", + "TimestampFilter": { + "target": "com.amazonaws.glue#TimestampFilter", "traits": { - "smithy.api#documentation": "

The filter criteria.

" + "smithy.api#documentation": "

A timestamp filter.

" } }, - "Tags": { - "target": "com.amazonaws.glue#TagsMap", + "MaxResults": { + "target": "com.amazonaws.glue#PageSize", "traits": { - "smithy.api#documentation": "

A list of key-value pair tags.

" + "smithy.api#documentation": "

The maximum number of results to return in this request.

" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "

A pagination token to request the next page of results.

" } } }, @@ -24557,19 +25166,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.glue#ListDataQualityRulesetsResponse": { + "com.amazonaws.glue#ListDataQualityStatisticsResponse": { "type": "structure", "members": { - "Rulesets": { - "target": "com.amazonaws.glue#DataQualityRulesetList", + "Statistics": { + "target": "com.amazonaws.glue#StatisticSummaryList", "traits": { - "smithy.api#documentation": "

A paginated list of rulesets for the specified list of Glue tables.

" + "smithy.api#documentation": "

A StatisticSummaryList.

" } }, "NextToken": { "target": "com.amazonaws.glue#PaginationToken", "traits": { - "smithy.api#documentation": "

A pagination token, if more results are available.

" + "smithy.api#documentation": "

A pagination token to request the next page of results.

" } } }, @@ -26227,6 +26836,12 @@ "smithy.api#documentation": "

The name of the data quality metric used for generating the observation.

" } }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

" + } + }, "MetricValues": { "target": "com.amazonaws.glue#DataQualityMetricValues", "traits": { @@ -28077,6 +28692,59 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#PutDataQualityProfileAnnotation": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#PutDataQualityProfileAnnotationRequest" + }, + "output": { + "target": "com.amazonaws.glue#PutDataQualityProfileAnnotationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "

Annotate all datapoints for a Profile.

" + } + }, + "com.amazonaws.glue#PutDataQualityProfileAnnotationRequest": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The ID of the data quality monitoring profile to annotate.

", + "smithy.api#required": {} + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "

The inclusion annotation value to apply to the profile.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#PutDataQualityProfileAnnotationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Left blank.

", + "smithy.api#output": {} + } + }, "com.amazonaws.glue#PutResourcePolicy": { "type": "operation", "input": { @@ -28785,6 +29453,12 @@ "smithy.api#documentation": "

Specifies a target that uses Amazon Redshift.

" } }, + "com.amazonaws.glue#ReferenceDatasetsList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#NameString" + } + }, "com.amazonaws.glue#RegisterSchemaVersion": { "type": "operation", "input": { @@ -29197,6 +29871,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#ResourceAction": { + "type": "enum", + "members": { + "UPDATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE" + } + }, + "CREATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE" + } + } + } + }, "com.amazonaws.glue#ResourceNotReadyException": { "type": "structure", "members": { @@ -29250,6 +29941,41 @@ } } }, + "com.amazonaws.glue#ResourceState": { + "type": "enum", + "members": { + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESS" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.glue#ResourceType": { "type": "enum", "members": { @@ -29416,6 +30142,26 @@ "com.amazonaws.glue#RunId": { "type": "string" }, + "com.amazonaws.glue#RunIdentifier": { + "type": "structure", + "members": { + "RunId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Run ID.

" + } + }, + "JobRunId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Job Run ID.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A run identifier.

" + } + }, "com.amazonaws.glue#RunMetrics": { "type": "structure", "members": { @@ -31119,6 +31865,12 @@ "traits": { "smithy.api#documentation": "

Allows you to specify that you want to search the tables shared with your account. The allowable values are FOREIGN or ALL.

\n
    \n
  • \n

    If set to FOREIGN, will search the tables shared with your account.

    \n
  • \n
  • \n

    If set to ALL, will search the tables shared with your account, as well as the tables in yor local account.

    \n
  • \n
" } + }, + "IncludeStatusDetails": { + "target": "com.amazonaws.glue#BooleanNullable", + "traits": { + "smithy.api#documentation": "

Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.

" + } } }, "traits": { @@ -32507,6 +33259,12 @@ "smithy.api#documentation": "

A name for the ruleset.

" } }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the security configuration created with the data quality encryption option.

" + } + }, "ClientToken": { "target": "com.amazonaws.glue#HashString", "traits": { @@ -33357,6 +34115,237 @@ } } }, + "com.amazonaws.glue#StatisticAnnotation": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID.

" + } + }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

" + } + }, + "StatisticRecordedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the annotated statistic was recorded.

" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#TimestampedInclusionAnnotation", + "traits": { + "smithy.api#documentation": "

The inclusion annotation applied to the statistic.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A Statistic Annotation.

" + } + }, + "com.amazonaws.glue#StatisticEvaluationLevel": { + "type": "enum", + "members": { + "DATASET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Dataset" + } + }, + "COLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Column" + } + }, + "MULTICOLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Multicolumn" + } + } + } + }, + "com.amazonaws.glue#StatisticModelResult": { + "type": "structure", + "members": { + "LowerBound": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "

The lower bound.

" + } + }, + "UpperBound": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "

The upper bound.

" + } + }, + "PredictedValue": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "

The predicted value.

" + } + }, + "ActualValue": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "

The actual value.

" + } + }, + "Date": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The date.

" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "

The inclusion annotation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The statistic model result.

" + } + }, + "com.amazonaws.glue#StatisticModelResults": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#StatisticModelResult" + } + }, + "com.amazonaws.glue#StatisticNameString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[A-Z][A-Za-z\\.]+$" + } + }, + "com.amazonaws.glue#StatisticPropertiesMap": { + "type": "map", + "key": { + "target": "com.amazonaws.glue#NameString" + }, + "value": { + "target": "com.amazonaws.glue#DescriptionString" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.glue#StatisticSummary": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Statistic ID.

" + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "

The Profile ID.

" + } + }, + "RunIdentifier": { + "target": "com.amazonaws.glue#RunIdentifier", + "traits": { + "smithy.api#documentation": "

The Run Identifier

" + } + }, + "StatisticName": { + "target": "com.amazonaws.glue#StatisticNameString", + "traits": { + "smithy.api#documentation": "

The name of the statistic.

" + } + }, + "DoubleValue": { + "target": "com.amazonaws.glue#Double", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The value of the statistic.

" + } + }, + "EvaluationLevel": { + "target": "com.amazonaws.glue#StatisticEvaluationLevel", + "traits": { + "smithy.api#documentation": "

The evaluation level of the statistic. Possible values: Dataset, Column, Multicolumn.

" + } + }, + "ColumnsReferenced": { + "target": "com.amazonaws.glue#ColumnNameList", + "traits": { + "smithy.api#documentation": "

The list of columns referenced by the statistic.

" + } + }, + "ReferencedDatasets": { + "target": "com.amazonaws.glue#ReferenceDatasetsList", + "traits": { + "smithy.api#documentation": "

The list of datasets referenced by the statistic.

" + } + }, + "StatisticProperties": { + "target": "com.amazonaws.glue#StatisticPropertiesMap", + "traits": { + "smithy.api#documentation": "

A StatisticPropertiesMap, which contains a NameString and DescriptionString\n

" + } + }, + "RecordedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the statistic was recorded.

" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#TimestampedInclusionAnnotation", + "traits": { + "smithy.api#documentation": "

The inclusion annotation for the statistic.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about a statistic.

" + } + }, + "com.amazonaws.glue#StatisticSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#StatisticSummary" + }, + "traits": { + "smithy.api#documentation": "

A list of StatisticSummary.

" + } + }, + "com.amazonaws.glue#StatusDetails": { + "type": "structure", + "members": { + "RequestedChange": { + "target": "com.amazonaws.glue#Table", + "traits": { + "smithy.api#documentation": "

A Table object representing the requested changes.

" + } + }, + "ViewValidations": { + "target": "com.amazonaws.glue#ViewValidationList", + "traits": { + "smithy.api#documentation": "

A list of ViewValidation objects that contain information for an analytical engine to validate a view.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing information about an asynchronous change to a table.

" + } + }, "com.amazonaws.glue#StopColumnStatisticsTaskRun": { "type": "operation", "input": { @@ -34015,12 +35004,38 @@ "traits": { "smithy.api#documentation": "

Specifies whether the view supports the SQL dialects of one or more different query engines and can therefore be read by those engines.

" } + }, + "Status": { + "target": "com.amazonaws.glue#TableStatus" } }, "traits": { "smithy.api#documentation": "

Represents a collection of related data organized in columns and rows.

" } }, + "com.amazonaws.glue#TableAttributes": { + "type": "enum", + "members": { + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NAME" + } + }, + "TABLE_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TABLE_TYPE" + } + } + } + }, + "com.amazonaws.glue#TableAttributesList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#TableAttributes" + } + }, "com.amazonaws.glue#TableError": { "type": "structure", "members": { @@ -34324,6 +35339,62 @@ } } }, + "com.amazonaws.glue#TableStatus": { + "type": "structure", + "members": { + "RequestedBy": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The ARN of the user who requested the asynchronous change.

" + } + }, + "UpdatedBy": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The ARN of the user to last manually alter the asynchronous change (requesting cancellation, etc).

" + } + }, + "RequestTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

An ISO 8601 formatted date string indicating the time that the change was initiated.

" + } + }, + "UpdateTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

An ISO 8601 formatted date string indicating the time that the state was last updated.

" + } + }, + "Action": { + "target": "com.amazonaws.glue#ResourceAction", + "traits": { + "smithy.api#documentation": "

Indicates which action was called on the table, currently only CREATE or UPDATE.

" + } + }, + "State": { + "target": "com.amazonaws.glue#ResourceState", + "traits": { + "smithy.api#documentation": "

A generic status for the change in progress, such as QUEUED, IN_PROGRESS, SUCCESS, or FAILED.

" + } + }, + "Error": { + "target": "com.amazonaws.glue#ErrorDetail", + "traits": { + "smithy.api#documentation": "

An error that will only appear when the state is \"FAILED\". This is a parent level exception message, there may be different Errors for each dialect.

" + } + }, + "Details": { + "target": "com.amazonaws.glue#StatusDetails", + "traits": { + "smithy.api#documentation": "

A StatusDetails object with information about the requested change.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing information about the state of an asynchronous change to a table.

" + } + }, "com.amazonaws.glue#TableTypeString": { "type": "string", "traits": { @@ -34824,9 +35895,49 @@ "com.amazonaws.glue#Timestamp": { "type": "timestamp" }, + "com.amazonaws.glue#TimestampFilter": { + "type": "structure", + "members": { + "RecordedBefore": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp before which statistics should be included in the results.

" + } + }, + "RecordedAfter": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp after which statistics should be included in the results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A timestamp filter.

" + } + }, "com.amazonaws.glue#TimestampValue": { "type": "timestamp" }, + "com.amazonaws.glue#TimestampedInclusionAnnotation": { + "type": "structure", + "members": { + "Value": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "

The inclusion annotation value.

" + } + }, + "LastModifiedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the inclusion annotation was last modified.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A timestamped inclusion annotation.

" + } + }, "com.amazonaws.glue#Token": { "type": "string" }, @@ -38171,6 +39282,56 @@ } } }, + "com.amazonaws.glue#ViewValidation": { + "type": "structure", + "members": { + "Dialect": { + "target": "com.amazonaws.glue#ViewDialect", + "traits": { + "smithy.api#documentation": "

The dialect of the query engine.

" + } + }, + "DialectVersion": { + "target": "com.amazonaws.glue#ViewDialectVersionString", + "traits": { + "smithy.api#documentation": "

The version of the dialect of the query engine. For example, 3.0.0.

" + } + }, + "ViewValidationText": { + "target": "com.amazonaws.glue#ViewTextString", + "traits": { + "smithy.api#documentation": "

The SELECT query that defines the view, as provided by the customer.

" + } + }, + "UpdateTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

The time of the last update.

" + } + }, + "State": { + "target": "com.amazonaws.glue#ResourceState", + "traits": { + "smithy.api#documentation": "

The state of the validation.

" + } + }, + "Error": { + "target": "com.amazonaws.glue#ErrorDetail", + "traits": { + "smithy.api#documentation": "

An error associated with the validation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains information for an analytical engine to validate a view, prior to persisting the view metadata. Used in the case of direct UpdateTable or CreateTable API calls.

" + } + }, + "com.amazonaws.glue#ViewValidationList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#ViewValidation" + } + }, "com.amazonaws.glue#WorkerType": { "type": "enum", "members": { diff --git a/models/groundstation.json b/models/groundstation.json index fbdf8a4860..fe43bbc870 100644 --- a/models/groundstation.json +++ b/models/groundstation.json @@ -4979,7 +4979,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Ephemeris data in Orbit Ephemeris Message (OEM) format.\n

\n

\n Position, velocity, and acceleration units must be represented in km, km/s, and\n km/s**2, respectively, in ephemeris data lines. Covariance matrix line units must be\n represented in km**2 if computed from two positions, km**2/s if computed from one\n position and one velocity, and km**2/s**2 if computed from two velocities. Consult section\n 7.7.2 of The Consultative Committee for Space Data Systems (CCSDS)\n Recommended Standard for Orbit Data Messages\n for more information.\n

" + "smithy.api#documentation": "

\n Ephemeris data in Orbit Ephemeris Message (OEM) format.\n

\n

\n AWS Ground Station processes OEM Customer Provided Ephemerides according to the CCSDS standard with some extra restrictions. OEM files should be in KVN format. For more detail about the OEM format that AWS Ground Station supports, see OEM ephemeris format in the AWS Ground Station user guide.\n

" } }, "com.amazonaws.groundstation#PaginationMaxResults": { diff --git a/models/iam.json b/models/iam.json index b03777a860..35d29c7874 100644 --- a/models/iam.json +++ b/models/iam.json @@ -1995,8 +1995,7 @@ "LastUsedDate": { "target": "com.amazonaws.iam#dateType", "traits": { - "smithy.api#documentation": "

The date and time, in ISO 8601 date-time\n format, when the access key was most recently used. This field is null in the\n following situations:

\n
    \n
  • \n

    The user does not have an access key.

    \n
  • \n
  • \n

    An access key exists but has not been used since IAM began tracking this\n information.

    \n
  • \n
  • \n

    There is no sign-in data associated with the user.

    \n
  • \n
", - "smithy.api#required": {} + "smithy.api#documentation": "

The date and time, in ISO 8601 date-time\n format, when the access key was most recently used. This field is null in the\n following situations:

\n
    \n
  • \n

    The user does not have an access key.

    \n
  • \n
  • \n

    An access key exists but has not been used since IAM began tracking this\n information.

    \n
  • \n
  • \n

    There is no sign-in data associated with the user.

    \n
  • \n
" } }, "ServiceName": { @@ -3140,7 +3139,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

\n

The OIDC provider that you create with this operation can be used as a principal in a\n role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and\n the OIDC provider.

\n

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't\n need to create a separate IAM identity provider. These OIDC identity providers are\n already built-in to Amazon Web Services and are available for your use. Instead, you can move directly\n to creating new roles using your identity provider. To learn more, see Creating\n a role for web identity or OpenID connect federation in the IAM\n User Guide.

\n

When you create the IAM OIDC provider, you specify the following:

\n
    \n
  • \n

    The URL of the OIDC identity provider (IdP) to trust

    \n
  • \n
  • \n

    A list of client IDs (also known as audiences) that identify the application\n or applications allowed to authenticate using the OIDC provider

    \n
  • \n
  • \n

    A list of tags that are attached to the specified IAM OIDC provider

    \n
  • \n
  • \n

    A list of thumbprints of one or more server certificates that the IdP\n uses

    \n
  • \n
\n

You get all of this information from the OIDC IdP you want to use to access\n Amazon Web Services.

\n \n

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library\n of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to\n verify your IdP server certificate. In these cases, your legacy thumbprint remains in your\n configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub,\n GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS)\n endpoint.

\n
\n \n

The trust for the OIDC provider is derived from the IAM provider that this\n operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged\n users.

\n
", + "smithy.api#documentation": "

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

\n

The OIDC provider that you create with this operation can be used as a principal in a\n role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and\n the OIDC provider.

\n

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't\n need to create a separate IAM identity provider. These OIDC identity providers are\n already built-in to Amazon Web Services and are available for your use. Instead, you can move directly\n to creating new roles using your identity provider. To learn more, see Creating\n a role for web identity or OpenID connect federation in the IAM\n User Guide.

\n

When you create the IAM OIDC provider, you specify the following:

\n
    \n
  • \n

    The URL of the OIDC identity provider (IdP) to trust

    \n
  • \n
  • \n

    A list of client IDs (also known as audiences) that identify the application\n or applications allowed to authenticate using the OIDC provider

    \n
  • \n
  • \n

    A list of tags that are attached to the specified IAM OIDC provider

    \n
  • \n
  • \n

    A list of thumbprints of one or more server certificates that the IdP\n uses

    \n
  • \n
\n

You get all of this information from the OIDC IdP you want to use to access\n Amazon Web Services.

\n \n

Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of\n trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS)\n endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed\n by one of these trusted CAs, only then we secure communication using the thumbprints set\n in the IdP's configuration.

\n
\n \n

The trust for the OIDC provider is derived from the IAM provider that this\n operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged\n users.

\n
", "smithy.api#examples": [ { "title": "To create an instance profile", @@ -7696,6 +7695,21 @@ "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], + "smithy.test#smokeTests": [ + { + "id": "GetUserFailure", + "params": { + "UserName": "fake_user" + }, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "UserExists": { "acceptors": [ @@ -8210,7 +8224,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the account alias associated with the Amazon Web Services account (Note: you can have only\n one). For information about using an Amazon Web Services account alias, see Creating,\n deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In\n User Guide.

", + "smithy.api#documentation": "

Lists the account alias associated with the Amazon Web Services account (Note: you can have only\n one). For information about using an Amazon Web Services account alias, see Creating,\n deleting, and listing an Amazon Web Services account alias in the\n IAM User Guide.

", "smithy.api#examples": [ { "title": "To list account aliases", @@ -10918,7 +10932,20 @@ "outputToken": "Marker", "items": "Users", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListUsersSuccess", + "params": {}, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.iam#ListUsersRequest": { @@ -11300,7 +11327,7 @@ "code": "OpenIdIdpCommunicationError", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The request failed because IAM cannot connect to the OpenID Connect identity provider URL.

", + "smithy.api#documentation": "

The request failed because IAM cannot connect to the OpenID Connect identity provider\n URL.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14918,7 +14945,7 @@ } ], "traits": { - "smithy.api#documentation": "

Replaces the existing list of server certificate thumbprints associated with an OpenID\n Connect (OIDC) provider resource object with a new list of thumbprints.

\n

The list that you pass with this operation completely replaces the existing list of\n thumbprints. (The lists are not merged.)

\n

Typically, you need to update a thumbprint only when the identity provider certificate\n changes, which occurs rarely. However, if the provider's certificate\n does change, any attempt to assume an IAM role that specifies\n the OIDC provider as a principal fails until the certificate thumbprint is\n updated.

\n \n

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library\n of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to\n verify your IdP server certificate. In these cases, your legacy thumbprint remains in your\n configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub,\n GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS)\n endpoint.

\n
\n \n

Trust for the OIDC provider is derived from the provider certificate and is\n validated by the thumbprint. Therefore, it is best to limit access to the\n UpdateOpenIDConnectProviderThumbprint operation to highly\n privileged users.

\n
" + "smithy.api#documentation": "

Replaces the existing list of server certificate thumbprints associated with an OpenID\n Connect (OIDC) provider resource object with a new list of thumbprints.

\n

The list that you pass with this operation completely replaces the existing list of\n thumbprints. (The lists are not merged.)

\n

Typically, you need to update a thumbprint only when the identity provider certificate\n changes, which occurs rarely. However, if the provider's certificate\n does change, any attempt to assume an IAM role that specifies\n the OIDC provider as a principal fails until the certificate thumbprint is\n updated.

\n \n

Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of\n trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS)\n endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed\n by one of these trusted CAs, only then we secure communication using the thumbprints set\n in the IdP's configuration.

\n
\n \n

Trust for the OIDC provider is derived from the provider certificate and is\n validated by the thumbprint. Therefore, it is best to limit access to the\n UpdateOpenIDConnectProviderThumbprint operation to highly\n privileged users.

\n
" } }, "com.amazonaws.iam#UpdateOpenIDConnectProviderThumbprintRequest": { diff --git a/models/inspector2.json b/models/inspector2.json index 3efe5ed0c9..ced6d68a6f 100644 --- a/models/inspector2.json +++ b/models/inspector2.json @@ -14004,7 +14004,7 @@ "com.amazonaws.inspector2#TagValueList": { "type": "list", "member": { - "target": "smithy.api#String" + "target": "com.amazonaws.inspector2#TargetResourceTagsValue" }, "traits": { "smithy.api#length": { @@ -14044,7 +14044,7 @@ "com.amazonaws.inspector2#TargetResourceTags": { "type": "map", "key": { - "target": "com.amazonaws.inspector2#NonEmptyString" + "target": "com.amazonaws.inspector2#TargetResourceTagsKey" }, "value": { "target": "com.amazonaws.inspector2#TagValueList" @@ -14056,6 +14056,25 @@ } } }, + "com.amazonaws.inspector2#TargetResourceTagsKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\p{L}\\p{Z}\\p{N}_.:/=\\-@]*$" + } + }, + "com.amazonaws.inspector2#TargetResourceTagsValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.inspector2#TargetStatusFilterList": { "type": "list", "member": { diff --git a/models/iotsitewise.json b/models/iotsitewise.json index f890604ec6..08b07a73f0 100644 --- a/models/iotsitewise.json +++ b/models/iotsitewise.json @@ -1489,7 +1489,7 @@ "id": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

\n The ID of the asset composite model.\n

" + "smithy.api#documentation": "

The ID of the asset composite model.

" } }, "externalId": { @@ -1548,7 +1548,7 @@ "externalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the asset model.

\n

If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using \n UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of.

" + "smithy.api#documentation": "

An external ID to assign to the asset model.

\n

If the composite model is a derived composite model, or one nested inside a component\n model, you can only set the external ID using UpdateAssetModelCompositeModel and\n specifying the derived ID of the model or property from the created model it's a part\n of.

" } }, "name": { @@ -1658,7 +1658,7 @@ "externalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

The external ID of the hierarchy, if it has one. When you update an asset hierarchy,\n you may assign an external ID if it doesn't already have one. You can't change the external ID\n of an asset hierarchy that already has one. For more information, see Using external IDs in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

The external ID of the hierarchy, if it has one. When you update an asset hierarchy, you\n may assign an external ID if it doesn't already have one. You can't change the external ID of\n an asset hierarchy that already has one. For more information, see Using external IDs in the IoT SiteWise User Guide.

" } } }, @@ -1724,7 +1724,7 @@ "id": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

\n The ID of the asset model composite model.\n

" + "smithy.api#documentation": "

The ID of the asset model composite model.

" } }, "externalId": { @@ -1828,7 +1828,7 @@ "id": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

The ID of the the composite model that this summary describes..

", + "smithy.api#documentation": "

The ID of the composite model that this summary describes..

", "smithy.api#required": {} } }, @@ -1841,21 +1841,21 @@ "name": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

The name of the the composite model that this summary describes..

", + "smithy.api#documentation": "

The name of the composite model that this summary describes..

", "smithy.api#required": {} } }, "type": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

The type of asset model.

\n
    \n
  • \n

    \n ASSET_MODEL – (default) An asset model that you can use to create assets.\n Can't be included as a component in another asset model.

    \n
  • \n
  • \n

    \n COMPONENT_MODEL – A reusable component that you can include in the composite\n models of other asset models. You can't create assets directly from this type of asset model.

    \n
  • \n
", + "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.iotsitewise#Description", "traits": { - "smithy.api#documentation": "

The description of the the composite model that this summary describes..

" + "smithy.api#documentation": "

The description of the composite model that this summary describes..

" } }, "path": { @@ -1887,7 +1887,7 @@ "id": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of the asset model hierarchy. This ID is a hierarchyId.

\n
    \n
  • \n

    If you are callling UpdateAssetModel\n to create a new hierarchy: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.

    \n
  • \n
  • \n

    If you are calling UpdateAssetModel to modify an existing\n hierarchy: This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

The ID of the asset model hierarchy. This ID is a hierarchyId.

\n
    \n
  • \n

    If you are callling UpdateAssetModel to create a\n new hierarchy: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.

    \n
  • \n
  • \n

    If you are calling UpdateAssetModel to modify an existing\n hierarchy: This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.

    \n
  • \n
" } }, "name": { @@ -1900,7 +1900,7 @@ "childAssetModelId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the\n childAssetModelId asset model. IoT SiteWise will always return the actual\n asset model ID for this value. However, when you are specifying this value as part of a call to\n UpdateAssetModel, you may provide either the asset model ID or else externalId:\n followed by the asset model's external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.\n

", + "smithy.api#documentation": "

The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the\n childAssetModelId asset model. IoT SiteWise will always return the actual\n asset model ID for this value. However, when you are specifying this value as part of a call to\n UpdateAssetModel, you may provide either the asset model ID or else externalId:\n followed by the asset model's external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.\n

", "smithy.api#required": {} } }, @@ -1941,7 +1941,7 @@ "externalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the asset model hierarchy. The external ID must be unique among\n asset model hierarchies within this asset model. For more information, see Using external IDs in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

An external ID to assign to the asset model hierarchy. The external ID must be unique\n among asset model hierarchies within this asset model. For more information, see Using external IDs in the IoT SiteWise User Guide.

" } } }, @@ -1967,7 +1967,7 @@ "id": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of the asset model property.

\n
    \n
  • \n

    If you are callling UpdateAssetModel\n to create a new property: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.

    \n
  • \n
  • \n

    If you are calling UpdateAssetModel to modify an existing\n property: This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

The ID of the asset model property.

\n
    \n
  • \n

    If you are callling UpdateAssetModel to create a\n new property: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.

    \n
  • \n
  • \n

    If you are calling UpdateAssetModel to modify an existing\n property: This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.

    \n
  • \n
" } }, "name": { @@ -2155,7 +2155,7 @@ "assetModelCompositeModelId": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

\n The ID of the composite model that contains the asset model property.\n

" + "smithy.api#documentation": "

The ID of the composite model that contains the asset model property.

" } }, "path": { @@ -2461,7 +2461,7 @@ "unit": { "target": "com.amazonaws.iotsitewise#PropertyUnit", "traits": { - "smithy.api#documentation": "

\n The unit of measure (such as Newtons or RPM) of the asset property.\n

" + "smithy.api#documentation": "

The unit of measure (such as Newtons or RPM) of the asset property.

" } }, "notification": { @@ -2470,7 +2470,7 @@ "assetCompositeModelId": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

\n The ID of the composite model that contains the asset property.\n

" + "smithy.api#documentation": "

The ID of the composite model that contains the asset property.

" } }, "path": { @@ -2763,7 +2763,7 @@ "hierarchyId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow different groupings\n of assets to be formed that all come from the same asset model. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.

", + "smithy.api#documentation": "

The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow\n different groupings of assets to be formed that all come from the same asset model. For more\n information, see Asset hierarchies in the IoT SiteWise User Guide.

", "smithy.api#required": {} } }, @@ -3388,7 +3388,7 @@ "maxResults": { "target": "com.amazonaws.iotsitewise#BatchGetAssetPropertyAggregatesMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.

\n
    \n
  • \n

    The size of the result set is equal to 1 MB.

    \n
  • \n
  • \n

    The number of data points in the result set is equal to the value of\n maxResults. The maximum value of maxResults is 4000.

    \n
  • \n
" + "smithy.api#documentation": "

The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.

\n
    \n
  • \n

    The size of the result set is equal to 1 MB.

    \n
  • \n
  • \n

    The number of data points in the result set is equal to the value of\n maxResults. The maximum value of maxResults is 4000.

    \n
  • \n
" } } }, @@ -4287,7 +4287,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 104857600 + "max": 10000000 } } }, @@ -4327,6 +4327,12 @@ "traits": { "smithy.api#enumValue": "UNKNOWN" } + }, + "NOT_APPLICABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_APPLICABLE" + } } } }, @@ -4465,7 +4471,7 @@ "id": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

\n The ID of the composite model that contains the property.\n

" + "smithy.api#documentation": "

The ID of the composite model that contains the property.

" } }, "externalId": { @@ -4490,7 +4496,7 @@ } }, "traits": { - "smithy.api#documentation": "

Metadata for the composition relationship established by using composedAssetModelId in \n CreateAssetModelCompositeModel\n .

" + "smithy.api#documentation": "

Metadata for the composition relationship established by using\n composedAssetModelId in \n CreateAssetModelCompositeModel\n .

" } }, "com.amazonaws.iotsitewise#CompositionRelationship": { @@ -4510,7 +4516,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents a composite model that composed an asset model of type COMPONENT_MODEL.

" + "smithy.api#documentation": "

Represents a composite model that composed an asset model of type\n COMPONENT_MODEL.

" } }, "com.amazonaws.iotsitewise#CompositionRelationshipSummaries": { @@ -4539,7 +4545,7 @@ "assetModelCompositeModelType": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY.

", + "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or\n AWS/L4E_ANOMALY.

", "smithy.api#required": {} } } @@ -4667,7 +4673,8 @@ "smithy.api#length": { "min": 1, "max": 128 - } + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_-]+$" } }, "com.amazonaws.iotsitewise#CreateAccessPolicy": { @@ -4846,7 +4853,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an asset model from specified property and hierarchy definitions. You create\n assets from asset models. With asset models, you can easily create assets of the same type\n that have standardized definitions. Each asset created from a model inherits the asset model's\n property and hierarchy definitions. For more information, see Defining asset models in the\n IoT SiteWise User Guide.

\n

You can create two types of asset models, ASSET_MODEL or COMPONENT_MODEL.

\n
    \n
  • \n

    \n ASSET_MODEL – (default) An asset model that you can use to create assets.\n Can't be included as a component in another asset model.

    \n
  • \n
  • \n

    \n COMPONENT_MODEL – A reusable component that you can include in the composite\n models of other asset models. You can't create assets directly from this type of asset model.

    \n
  • \n
", + "smithy.api#documentation": "

Creates an asset model from specified property and hierarchy definitions. You create\n assets from asset models. With asset models, you can easily create assets of the same type\n that have standardized definitions. Each asset created from a model inherits the asset model's\n property and hierarchy definitions. For more information, see Defining asset models in the\n IoT SiteWise User Guide.

\n

You can create two types of asset models, ASSET_MODEL or\n COMPONENT_MODEL.

\n
    \n
  • \n

    \n ASSET_MODEL – (default) An asset model that\n you can use to create assets. Can't be included as a component in another asset\n model.

    \n
  • \n
  • \n

    \n COMPONENT_MODEL – A reusable component that\n you can include in the composite models of other asset models. You can't create\n assets directly from this type of asset model.

    \n
  • \n
", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -4889,7 +4896,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a custom composite model from specified property and hierarchy definitions. There are two types of custom composite models,\n inline and component-model-based.

\n

Use component-model-based custom composite models to define standard, reusable components. A component-model-based custom composite model consists of a name,\n a description, and the ID of the component model it references. A component-model-based custom composite model has no properties of its own; its referenced\n component model provides its associated properties to any created assets. For more information, see\n Custom composite models (Components) in the\n IoT SiteWise User Guide.

\n

Use inline custom composite models to organize the properties of an asset model. The properties of inline custom composite models are local to the asset model where they are\n included and can't be used to create multiple assets.

\n

To create a component-model-based model, specify the composedAssetModelId of an existing asset model with assetModelType of COMPONENT_MODEL.

\n

To create an inline model, specify the assetModelCompositeModelProperties and don't include an composedAssetModelId.

", + "smithy.api#documentation": "

Creates a custom composite model from specified property and hierarchy definitions. There\n are two types of custom composite models, inline and\n component-model-based.

\n

Use component-model-based custom composite models to define standard, reusable components.\n A component-model-based custom composite model consists of a name, a description, and the ID\n of the component model it references. A component-model-based custom composite model has no\n properties of its own; its referenced component model provides its associated properties to\n any created assets. For more information, see Custom composite models (Components)\n in the IoT SiteWise User Guide.

\n

Use inline custom composite models to organize the properties of an asset model. The\n properties of inline custom composite models are local to the asset model where they are\n included and can't be used to create multiple assets.

\n

To create a component-model-based model, specify the composedAssetModelId of\n an existing asset model with assetModelType of\n COMPONENT_MODEL.

\n

To create an inline model, specify the assetModelCompositeModelProperties and\n don't include an composedAssetModelId.

", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -4920,13 +4927,13 @@ "assetModelCompositeModelExternalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the composite model.

\n

If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using \n UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of.

" + "smithy.api#documentation": "

An external ID to assign to the composite model.

\n

If the composite model is a derived composite model, or one nested inside a component\n model, you can only set the external ID using UpdateAssetModelCompositeModel and\n specifying the derived ID of the model or property from the created model it's a part\n of.

" } }, "assetModelCompositeModelId": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

The ID of the composite model. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, \n if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique.

" + "smithy.api#documentation": "

The ID of the composite model. IoT SiteWise automatically generates a unique ID for you, so this\n parameter is never required. However, if you prefer to supply your own ID instead, you can\n specify it here in UUID format. If you specify your own ID, it must be globally unique.

" } }, "assetModelCompositeModelDescription": { @@ -4938,7 +4945,7 @@ "assetModelCompositeModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

A unique, friendly name for the composite model.

", + "smithy.api#documentation": "

A unique name for the composite model.

", "smithy.api#required": {} } }, @@ -4959,13 +4966,13 @@ "composedAssetModelId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of a composite model on this asset.

" + "smithy.api#documentation": "

The ID of a component model which is reused to create this composite model.

" } }, "assetModelCompositeModelProperties": { "target": "com.amazonaws.iotsitewise#AssetModelPropertyDefinitions", "traits": { - "smithy.api#documentation": "

The property definitions of the composite model. For more information, see .

\n

You can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

The property definitions of the composite model. For more information, see \n Inline custom composite models in the IoT SiteWise User Guide.

\n

You can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.

" } } }, @@ -4979,7 +4986,7 @@ "assetModelCompositeModelId": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "

The ID of the composed asset model. You can use this ID when you call other IoT SiteWise APIs.

", + "smithy.api#documentation": "

The ID of the composed asset model. You can use this ID when you call other IoT SiteWise\n APIs.

", "smithy.api#required": {} } }, @@ -5007,7 +5014,7 @@ "assetModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

A unique, friendly name for the asset model.

", + "smithy.api#documentation": "

A unique name for the asset model.

", "smithy.api#required": {} } }, @@ -5032,7 +5039,7 @@ "assetModelCompositeModels": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModelDefinitions", "traits": { - "smithy.api#documentation": "

The composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.

\n \n

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see .

\n
" + "smithy.api#documentation": "

The composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.

\n \n

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see Creating custom composite models (Components) in the\n IoT SiteWise User Guide.

\n
" } }, "clientToken": { @@ -5214,7 +5221,7 @@ } ], "traits": { - "smithy.api#documentation": "

Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a\n bulk import job (CLI) in the Amazon Simple Storage Service User Guide.

\n \n

Before you create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier.\n For more information about how to configure storage settings, see PutStorageConfiguration.

\n

Bulk import is designed to store historical data to IoT SiteWise. It does not trigger computations or notifications on \n IoT SiteWise warm or cold tier storage.

\n
", + "smithy.api#documentation": "

Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a\n bulk import job (CLI) in the Amazon Simple Storage Service User Guide.

\n \n

Before you create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier.\n For more information about how to configure storage settings, see PutStorageConfiguration.

\n

Bulk import is designed to store historical data to IoT SiteWise. It does not trigger\n computations or notifications on IoT SiteWise warm or cold tier storage.

\n
", "smithy.api#endpoint": { "hostPrefix": "data." }, @@ -5457,9 +5464,9 @@ "type": "structure", "members": { "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { - "smithy.api#documentation": "

A unique, friendly name for the gateway.

", + "smithy.api#documentation": "

A unique name for the gateway.

", "smithy.api#required": {} } }, @@ -5864,7 +5871,7 @@ "scalarValue": { "target": "com.amazonaws.iotsitewise#ScalarValue", "traits": { - "smithy.api#documentation": "

Indicates if the data point is a scalar value such as integer, string, double, or Boolean.

" + "smithy.api#documentation": "

Indicates if the data point is a scalar value such as integer, string, double, or Boolean.\n

" } }, "arrayValue": { @@ -6067,7 +6074,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a composite model. This action can't be undone. You must delete all assets created\n from a composite model before you can delete the model. Also, you can't delete a composite model if\n a parent asset model exists that contains a property formula expression that depends on the\n asset model that you want to delete. For more information, see Deleting assets and models in the\n IoT SiteWise User Guide.

", + "smithy.api#documentation": "

Deletes a composite model. This action can't be undone. You must delete all assets created\n from a composite model before you can delete the model. Also, you can't delete a composite\n model if a parent asset model exists that contains a property formula expression that depends\n on the asset model that you want to delete. For more information, see Deleting assets and\n models in the IoT SiteWise User Guide.

", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -6279,6 +6286,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iotsitewise#ConflictingOperationException" + }, { "target": "com.amazonaws.iotsitewise#InternalFailureException" }, @@ -6829,7 +6839,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about an asset composite model (also known as an asset component). An AssetCompositeModel is an instance of an AssetModelCompositeModel. If you want to see information about the model this is based on, call \n DescribeAssetModelCompositeModel.

", + "smithy.api#documentation": "

Retrieves information about an asset composite model (also known as an asset component).\n An AssetCompositeModel is an instance of an\n AssetModelCompositeModel. If you want to see information about the model this is\n based on, call DescribeAssetModelCompositeModel.

", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -6884,7 +6894,7 @@ "assetCompositeModelExternalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the asset model.

\n

If the composite model is a component-based composite model, or one nested inside a component model, you can only set the external ID using \n UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of.

" + "smithy.api#documentation": "

An external ID to assign to the asset model.

\n

If the composite model is a component-based composite model, or one nested inside a\n component model, you can only set the external ID using\n UpdateAssetModelCompositeModel and specifying the derived ID of the model or\n property from the created model it's a part of.

" } }, "assetCompositeModelPath": { @@ -6911,7 +6921,7 @@ "assetCompositeModelType": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY.

", + "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or\n AWS/L4E_ANOMALY.

", "smithy.api#required": {} } }, @@ -7035,7 +7045,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about an asset model composite model (also known as an asset model component). For more information, see Custom composite models (Components) in the IoT SiteWise User Guide.

", + "smithy.api#documentation": "

Retrieves information about an asset model composite model (also known as an asset model\n component). For more information, see Custom composite models\n (Components) in the IoT SiteWise User Guide.

", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -7117,7 +7127,7 @@ "assetModelCompositeModelType": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY.

", + "smithy.api#documentation": "

The composite model type. Valid values are AWS/ALARM, CUSTOM, or\n AWS/L4E_ANOMALY.

", "smithy.api#required": {} } }, @@ -7131,7 +7141,7 @@ "compositionDetails": { "target": "com.amazonaws.iotsitewise#CompositionDetails", "traits": { - "smithy.api#documentation": "

Metadata for the composition relationship established by using composedAssetModelId in \n CreateAssetModelCompositeModel\n .\n For instance, an array detailing the path of the composition relationship for this composite model.

" + "smithy.api#documentation": "

Metadata for the composition relationship established by using\n composedAssetModelId in \n CreateAssetModelCompositeModel\n . For instance, an array detailing the\n path of the composition relationship for this composite model.

" } }, "assetModelCompositeModelSummaries": { @@ -7167,7 +7177,7 @@ "target": "com.amazonaws.iotsitewise#ExcludeProperties", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

\n Whether or not to exclude asset model properties from the response.\n

", + "smithy.api#documentation": "

Whether or not to exclude asset model properties from the response.

", "smithy.api#httpQuery": "excludeProperties" } } @@ -7224,7 +7234,7 @@ "assetModelCompositeModels": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModels", "traits": { - "smithy.api#documentation": "

The list of built-in composite models for the asset model, such as those with those of type AWS/ALARMS.

" + "smithy.api#documentation": "

The list of built-in composite models for the asset model, such as those with those of\n type AWS/ALARMS.

" } }, "assetModelCreationDate": { @@ -7257,7 +7267,7 @@ "assetModelCompositeModelSummaries": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModelSummaries", "traits": { - "smithy.api#documentation": "

The list of the immediate child custom composite model summaries for the asset model.

" + "smithy.api#documentation": "

The list of the immediate child custom composite model summaries for the asset\n model.

" } }, "assetModelExternalId": { @@ -7362,7 +7372,7 @@ "compositeModel": { "target": "com.amazonaws.iotsitewise#CompositeModelProperty", "traits": { - "smithy.api#documentation": "

The composite model that declares this asset property, if this asset property exists\n in a composite model.

" + "smithy.api#documentation": "

The composite model that declares this asset property, if this asset property exists in a\n composite model.

" } }, "assetExternalId": { @@ -7391,7 +7401,7 @@ "target": "com.amazonaws.iotsitewise#ExcludeProperties", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

\n Whether or not to exclude asset properties from the response.\n

", + "smithy.api#documentation": "

Whether or not to exclude asset properties from the response.

", "smithy.api#httpQuery": "excludeProperties" } } @@ -7962,7 +7972,7 @@ } }, "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { "smithy.api#documentation": "

The name of the gateway.

", "smithy.api#required": {} @@ -8689,7 +8699,7 @@ "assetId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of the parent asset from which to disassociate the child asset. This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.

", + "smithy.api#documentation": "

The ID of the parent asset from which to disassociate the child asset.\n This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -8697,7 +8707,7 @@ "hierarchyId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow different groupings\n of assets to be formed that all come from the same asset model. You can use the hierarchy ID\n to identify the correct asset to disassociate. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.

", + "smithy.api#documentation": "

The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow\n different groupings of assets to be formed that all come from the same asset model. You can\n use the hierarchy ID to identify the correct asset to disassociate. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.

", "smithy.api#required": {} } }, @@ -9051,7 +9061,7 @@ } ], "traits": { - "smithy.api#documentation": "

Run SQL queries to retrieve metadata and time-series data from asset models, assets, measurements, metrics, transforms, and aggregates.

", + "smithy.api#documentation": "

Run SQL queries to retrieve metadata and time-series data from asset models, assets,\n measurements, metrics, transforms, and aggregates.

", "smithy.api#endpoint": { "hostPrefix": "data." }, @@ -9299,6 +9309,16 @@ "smithy.api#documentation": "

Contains a summary of a gateway capability configuration.

" } }, + "com.amazonaws.iotsitewise#GatewayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[^\\u0000-\\u001F\\u007F]+$" + } + }, "com.amazonaws.iotsitewise#GatewayPlatform": { "type": "structure", "members": { @@ -9313,6 +9333,12 @@ "traits": { "smithy.api#documentation": "

A gateway that runs on IoT Greengrass V2.

" } + }, + "siemensIE": { + "target": "com.amazonaws.iotsitewise#SiemensIE", + "traits": { + "smithy.api#documentation": "

A SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.

" + } } }, "traits": { @@ -9336,9 +9362,9 @@ } }, "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { - "smithy.api#documentation": "

The name of the asset.

", + "smithy.api#documentation": "

The name of the gateway.

", "smithy.api#required": {} } }, @@ -9493,7 +9519,7 @@ "maxResults": { "target": "com.amazonaws.iotsitewise#GetAssetPropertyValueAggregatesMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.

\n
    \n
  • \n

    The size of the result set is equal to 1 MB.

    \n
  • \n
  • \n

    The number of data points in the result set is equal to the value of\n maxResults. The maximum value of maxResults is 2500.

    \n
  • \n
", + "smithy.api#documentation": "

The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.

\n
    \n
  • \n

    The size of the result set is equal to 1 MB.

    \n
  • \n
  • \n

    The number of data points in the result set is equal to the value of\n maxResults. The maximum value of maxResults is 2500.

    \n
  • \n
", "smithy.api#httpQuery": "maxResults" } } @@ -9681,7 +9707,7 @@ "maxResults": { "target": "com.amazonaws.iotsitewise#GetAssetPropertyValueHistoryMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.

\n
    \n
  • \n

    The size of the result set is equal to 4 MB.

    \n
  • \n
  • \n

    The number of data points in the result set is equal to the value of\n maxResults. The maximum value of maxResults is 20000.

    \n
  • \n
", + "smithy.api#documentation": "

The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.

\n
    \n
  • \n

    The size of the result set is equal to 4 MB.

    \n
  • \n
  • \n

    The number of data points in the result set is equal to the value of\n maxResults. The maximum value of maxResults is 20000.

    \n
  • \n
", "smithy.api#httpQuery": "maxResults" } } @@ -9928,7 +9954,7 @@ "groupArn": { "target": "com.amazonaws.iotsitewise#ARN", "traits": { - "smithy.api#documentation": "

The ARN of the Greengrass group. For more information about how to find a group's\n ARN, see ListGroups and GetGroup in the\n IoT Greengrass API Reference.

", + "smithy.api#documentation": "

The ARN of the Greengrass group. For more information about how to find a group's\n ARN, see ListGroups and GetGroup in the IoT Greengrass V1\n API Reference.

", "smithy.api#required": {} } } @@ -9949,7 +9975,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains details for a gateway that runs on IoT Greengrass V2. To create a gateway that runs on IoT Greengrass\n V2, you must deploy the IoT SiteWise Edge component to your gateway device. Your Greengrass\n device role must use the AWSIoTSiteWiseEdgeAccess policy. For more\n information, see Using IoT SiteWise at the edge in the\n IoT SiteWise User Guide.

" + "smithy.api#documentation": "

Contains details for a gateway that runs on IoT Greengrass V2. To create a gateway that runs on IoT Greengrass V2,\n you must deploy the IoT SiteWise Edge component to your gateway device. Your Greengrass\n device role must use the AWSIoTSiteWiseEdgeAccess policy. For more\n information, see Using IoT SiteWise at the edge in the\n IoT SiteWise User Guide.

" } }, "com.amazonaws.iotsitewise#GroupIdentity": { @@ -10048,7 +10074,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains an identity that can access an IoT SiteWise Monitor resource.

\n \n

Currently, you can't use Amazon Web Services API operations to retrieve IAM Identity Center identity IDs. You can find the\n IAM Identity Center identity IDs in the URL of user and group pages in the IAM Identity Center console.

\n
" + "smithy.api#documentation": "

Contains an identity that can access an IoT SiteWise Monitor resource.

\n \n

Currently, you can't use Amazon Web Services API operations to retrieve IAM Identity Center identity IDs. You can\n find the IAM Identity Center identity IDs in the URL of user and group pages in the IAM Identity Center console.

\n
" } }, "com.amazonaws.iotsitewise#IdentityId": { @@ -10259,6 +10285,16 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.iotsitewise#IotCoreThingName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_-]+$" + } + }, "com.amazonaws.iotsitewise#JobConfiguration": { "type": "structure", "members": { @@ -10968,7 +11004,7 @@ "filter": { "target": "com.amazonaws.iotsitewise#ListAssetPropertiesFilter", "traits": { - "smithy.api#documentation": "

Filters the requested list of asset properties. You can choose one of the following\n options:

\n
    \n
  • \n

    \n ALL – The list includes all asset properties for a given asset\n model ID.

    \n
  • \n
  • \n

    \n BASE – The list includes only base asset properties for a given\n asset model ID.

    \n
  • \n
\n

Default: BASE\n

", + "smithy.api#documentation": "

Filters the requested list of asset properties. You can choose one of the following\n options:

\n
    \n
  • \n

    \n ALL – The list includes all asset properties for a given asset model ID.\n

    \n
  • \n
  • \n

    \n BASE – The list includes only base asset properties for a given asset\n model ID.

    \n
  • \n
\n

Default: BASE\n

", "smithy.api#httpQuery": "filter" } } @@ -11265,7 +11301,7 @@ "hierarchyId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "

The ID of the hierarchy by which child assets are associated to the asset. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) To find a\n hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This\n parameter is required if you choose CHILD for\n traversalDirection.

\n

For more information, see Asset hierarchies in the IoT SiteWise User Guide.

", + "smithy.api#documentation": "

The ID of the hierarchy by which child assets are associated to the asset.\n (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This parameter is required if you choose\n CHILD for traversalDirection.

\n

For more information, see Asset hierarchies in the IoT SiteWise User Guide.

", "smithy.api#httpQuery": "hierarchyId" } }, @@ -11476,7 +11512,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a paginated list of composition relationships for an asset model of type COMPONENT_MODEL.

", + "smithy.api#documentation": "

Retrieves a paginated list of composition relationships for an asset model of type\n COMPONENT_MODEL.

", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -13450,6 +13486,21 @@ "smithy.api#httpError": 503 } }, + "com.amazonaws.iotsitewise#SiemensIE": { + "type": "structure", + "members": { + "iotCoreThingName": { + "target": "com.amazonaws.iotsitewise#IotCoreThingName", + "traits": { + "smithy.api#documentation": "

The name of the IoT Thing for your SiteWise Edge gateway.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details for a SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.

" + } + }, "com.amazonaws.iotsitewise#StorageType": { "type": "enum", "members": { @@ -14135,7 +14186,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an asset model and all of the assets that were created from the model. Each asset\n created from the model inherits the updated asset model's property and hierarchy definitions.\n For more information, see Updating assets and models in the\n IoT SiteWise User Guide.

\n \n

This operation overwrites the existing model with the provided model. To avoid deleting\n your asset model's properties or hierarchies, you must include their IDs and definitions in\n the updated asset model payload. For more information, see DescribeAssetModel.

\n

If you remove a property from an asset model, IoT SiteWise deletes all previous data for that\n property. If you remove a hierarchy definition from an asset model, IoT SiteWise disassociates every\n asset associated with that hierarchy. You can't change the type or data type of an existing\n property.

\n
", + "smithy.api#documentation": "

Updates an asset model and all of the assets that were created from the model. Each asset\n created from the model inherits the updated asset model's property and hierarchy definitions.\n For more information, see Updating assets and models in the\n IoT SiteWise User Guide.

\n \n

If you remove a property from an asset model, IoT SiteWise deletes all previous data for that\n property. You can’t change the type or data type of an existing property.

\n

To replace an existing asset model property with a new one with the same\n name, do the following:

\n
    \n
  1. \n

    Submit an UpdateAssetModel request with the entire existing property\n removed.

    \n
  2. \n
  3. \n

    Submit a second UpdateAssetModel request that includes the new\n property. The new asset property will have the same name as the previous\n one and IoT SiteWise will generate a new unique id.

    \n
  4. \n
\n
", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -14178,7 +14229,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a composite model and all of the assets that were created from the model. Each asset\n created from the model inherits the updated asset model's property and hierarchy definitions.\n For more information, see Updating assets and models in the\n IoT SiteWise User Guide.

\n \n

If you remove a property from a composite asset model, IoT SiteWise deletes all previous data for that property. You can’t change the type or data type of an existing property.

\n

To replace an existing composite asset model property with a new one with the same name, do the following:

\n
    \n
  1. \n

    Submit an UpdateAssetModelCompositeModel request with the entire existing property removed.

    \n
  2. \n
  3. \n

    Submit a second UpdateAssetModelCompositeModel request that includes the new property. The new asset property will have the same\n name as the previous one and IoT SiteWise will generate a new unique id.

    \n
  4. \n
\n
", + "smithy.api#documentation": "

Updates a composite model and all of the assets that were created from the model. Each\n asset created from the model inherits the updated asset model's property and hierarchy\n definitions. For more information, see Updating assets and models in the\n IoT SiteWise User Guide.

\n \n

If you remove a property from a composite asset model, IoT SiteWise deletes all previous data\n for that property. You can’t change the type or data type of an existing property.

\n

To replace an existing composite asset model property with a new one with the same\n name, do the following:

\n
    \n
  1. \n

    Submit an UpdateAssetModelCompositeModel request with the entire\n existing property removed.

    \n
  2. \n
  3. \n

    Submit a second UpdateAssetModelCompositeModel request that includes\n the new property. The new asset property will have the same name as the\n previous one and IoT SiteWise will generate a new unique id.

    \n
  4. \n
\n
", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -14211,7 +14262,7 @@ "assetModelCompositeModelExternalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the asset model. You can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to \n the exact same thing as when it was created.

" + "smithy.api#documentation": "

An external ID to assign to the asset model. You can only set the external ID of the asset\n model if it wasn't set when it was created, or you're setting it to the exact same thing as\n when it was created.

" } }, "assetModelCompositeModelDescription": { @@ -14223,7 +14274,7 @@ "assetModelCompositeModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

A unique, friendly name for the composite model.

", + "smithy.api#documentation": "

A unique name for the composite model.

", "smithy.api#required": {} } }, @@ -14237,7 +14288,7 @@ "assetModelCompositeModelProperties": { "target": "com.amazonaws.iotsitewise#AssetModelProperties", "traits": { - "smithy.api#documentation": "

The property definitions of the composite model. For more information, see .

\n

You can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

The property definitions of the composite model. For more information, see \n Inline custom composite models in the IoT SiteWise User Guide.

\n

You can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.

" } } }, @@ -14280,7 +14331,7 @@ "assetModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "

A unique, friendly name for the asset model.

", + "smithy.api#documentation": "

A unique name for the asset model.

", "smithy.api#required": {} } }, @@ -14305,7 +14356,7 @@ "assetModelCompositeModels": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModels", "traits": { - "smithy.api#documentation": "

The composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.

\n \n

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see .

\n
" + "smithy.api#documentation": "

The composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.

\n \n

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see Creating custom composite models (Components) in the\n IoT SiteWise User Guide.

\n
" } }, "clientToken": { @@ -14318,7 +14369,7 @@ "assetModelExternalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

An external ID to assign to the asset model. The asset model must not already have an\n external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.

" } } }, @@ -14461,7 +14512,7 @@ "assetExternalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "

An external ID to assign to the asset. The asset must not already have an external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

An external ID to assign to the asset. The asset must not already have an external ID.\n The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.

" } } }, @@ -14709,9 +14760,9 @@ } }, "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { - "smithy.api#documentation": "

A unique, friendly name for the gateway.

", + "smithy.api#documentation": "

A unique name for the gateway.

", "smithy.api#required": {} } } @@ -14968,13 +15019,13 @@ "propertyId": { "target": "com.amazonaws.iotsitewise#Macro", "traits": { - "smithy.api#documentation": "

The ID of the property to use as the variable. You can use the property name\n if it's from the same asset model. If the property has an external ID, you can specify\n externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

The ID of the property to use as the variable. You can use the property name\n if it's from the same asset model. If the property has an external ID, you can specify\n externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.

" } }, "hierarchyId": { "target": "com.amazonaws.iotsitewise#Macro", "traits": { - "smithy.api#documentation": "

The ID of the hierarchy to query for the property ID. You can use the hierarchy's name\n instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify\n externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.

\n

You use a hierarchy ID instead of a model ID because you can have several hierarchies\n using the same model and therefore the same propertyId. For example, you might\n have separately grouped assets that come from the same asset model. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.

" + "smithy.api#documentation": "

The ID of the hierarchy to query for the property ID. You can use the hierarchy's name\n instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify\n externalId: followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.

\n

You use a hierarchy ID instead of a model ID because you can have several hierarchies\n using the same model and therefore the same propertyId. For example, you might\n have separately grouped assets that come from the same asset model. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.

" } }, "propertyPath": { @@ -15000,7 +15051,7 @@ "integerValue": { "target": "com.amazonaws.iotsitewise#PropertyValueIntegerValue", "traits": { - "smithy.api#documentation": "

Asset property data of type integer (number that's greater than or equal to zero).

" + "smithy.api#documentation": "

Asset property data of type integer (whole number).

" } }, "doubleValue": { diff --git a/models/ivs-realtime.json b/models/ivs-realtime.json index 234a738b0e..f451a1b089 100644 --- a/models/ivs-realtime.json +++ b/models/ivs-realtime.json @@ -120,7 +120,7 @@ "sdkId": "IVS RealTime", "arnNamespace": "ivs", "cloudFormationName": "IVS", - "cloudTrailEventSource": "REPLACE_WITH_EVENT_SOURCE", + "cloudTrailEventSource": "ivs.amazonaws.com", "endpointPrefix": "ivsrealtime" }, "aws.auth#sigv4": { diff --git a/models/ivs.json b/models/ivs.json index 3f00894941..944a77b51a 100644 --- a/models/ivs.json +++ b/models/ivs.json @@ -132,7 +132,7 @@ "sdkId": "ivs", "arnNamespace": "ivs", "cloudFormationName": "IVS", - "cloudTrailEventSource": "REPLACE_WITH_EVENT_SOURCE" + "cloudTrailEventSource": "ivs.amazonaws.com" }, "aws.auth#sigv4": { "name": "ivs" @@ -157,7 +157,7 @@ "date" ] }, - "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP\n API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both\n requests and responses, including errors.

\n

The API is an Amazon Web Services regional service. For a list of supported regions and\n Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the\n Amazon Web Services General Reference.

\n

\n \n All API request parameters and URLs are case sensitive.\n \n \n

\n

For a summary of notable documentation changes in each release, see Document\n History.

\n

\n Allowed Header Values\n

\n
    \n
  • \n

    \n \n Accept:\n application/json

    \n
  • \n
  • \n

    \n \n Accept-Encoding:\n gzip, deflate

    \n
  • \n
  • \n

    \n \n Content-Type:\n application/json

    \n
  • \n
\n

\n Key Concepts\n

\n
    \n
  • \n

    \n Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream.

    \n
  • \n
  • \n

    \n Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. \n Treat the stream key like a secret, since it allows anyone to stream to the channel.\n \n

    \n
  • \n
  • \n

    \n Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token.

    \n
  • \n
  • \n

    \n Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration.

    \n
  • \n
  • \n

    \n Playback restriction policy — Restricts playback by countries and/or origin sites.

    \n
  • \n
\n

For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming.

\n

\n Tagging\n

\n

A tag is a metadata label that you assign to an Amazon Web Services\n resource. A tag comprises a key and a value, both\n set by you. For example, you might set a tag as topic:nature to label a\n particular video category. See Tagging Amazon Web Services Resources for\n more information, including restrictions that apply to tags and \"Tag naming limits and\n requirements\"; Amazon IVS has no service-specific constraints beyond what is documented\n there.

\n

Tags can help you identify and organize your Amazon Web Services resources. For example,\n you can use the same tag for different resources to indicate that they are related. You can\n also use tags to manage access (see Access Tags).

\n

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following\n resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording\n Configurations.

\n

At most 50 tags can be applied to a resource.

\n

\n Authentication versus Authorization\n

\n

Note the differences between these concepts:

\n
    \n
  • \n

    \n Authentication is about verifying identity. You need to be\n authenticated to sign Amazon IVS API requests.

    \n
  • \n
  • \n

    \n Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition,\n authorization is needed to view Amazon IVS private channels.\n (Private channels are channels that are enabled for \"playback authorization.\")

    \n
  • \n
\n

\n Authentication\n

\n

All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services\n Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying\n API calls for you. However, if your application calls the Amazon IVS API directly, it’s your\n responsibility to sign the requests.

\n

You generate a signature using valid Amazon Web Services credentials that have permission\n to perform the requested action. For example, you must sign PutMetadata requests with a\n signature generated from a user account that has the ivs:PutMetadata\n permission.

\n

For more information:

\n \n

\n Amazon Resource Names (ARNs)\n

\n

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a\n resource unambiguously across all of AWS, such as in IAM policies and API\n calls. For more information, see Amazon\n Resource Names in the AWS General Reference.

\n

\n Channel Endpoints\n

\n
    \n
  • \n

    \n CreateChannel — Creates a new channel and an associated stream\n key to start streaming.

    \n
  • \n
  • \n

    \n GetChannel — Gets the channel configuration for the specified\n channel ARN.

    \n
  • \n
  • \n

    \n BatchGetChannel — Performs GetChannel on\n multiple ARNs simultaneously.

    \n
  • \n
  • \n

    \n ListChannels — Gets summary information about all channels in\n your account, in the Amazon Web Services region where the API request is processed. This\n list can be filtered to match a specified name or recording-configuration ARN. Filters are\n mutually exclusive and cannot be used together. If you try to use both filters, you will\n get an error (409 Conflict Exception).

    \n
  • \n
  • \n

    \n UpdateChannel — Updates a channel's configuration. This does\n not affect an ongoing stream of this channel. You must stop and restart the stream for the\n changes to take effect.

    \n
  • \n
  • \n

    \n DeleteChannel — Deletes the specified channel.

    \n
  • \n
\n

\n Playback Restriction Policy Endpoints\n

\n \n

\n Private Channel Endpoints\n

\n

For more information, see Setting Up Private Channels in the\n Amazon IVS User Guide.

\n
    \n
  • \n

    \n ImportPlaybackKeyPair — Imports the public portion of a new\n key pair and returns its arn and fingerprint. The\n privateKey can then be used to generate viewer authorization tokens, to\n grant viewers access to private channels (channels enabled for playback\n authorization).

    \n
  • \n
  • \n

    \n GetPlaybackKeyPair — Gets a specified playback authorization\n key pair and returns the arn and fingerprint. The\n privateKey held by the caller can be used to generate viewer authorization\n tokens, to grant viewers access to private channels.

    \n
  • \n
  • \n

    \n ListPlaybackKeyPairs — Gets summary information about playback\n key pairs.

    \n
  • \n
  • \n

    \n DeletePlaybackKeyPair — Deletes a specified authorization key\n pair. This invalidates future viewer tokens generated using the key pair’s\n privateKey.

    \n
  • \n
  • \n

    \n StartViewerSessionRevocation — Starts the process of revoking\n the viewer session associated with a specified channel ARN and viewer ID. Optionally, you\n can provide a version to revoke viewer sessions less than and including that\n version.

    \n
  • \n
  • \n

    \n BatchStartViewerSessionRevocation — Performs StartViewerSessionRevocation on multiple channel ARN and viewer ID pairs\n simultaneously.

    \n
  • \n
\n

\n Recording Configuration Endpoints\n

\n \n

\n Stream Endpoints\n

\n
    \n
  • \n

    \n GetStream — Gets information about the active (live) stream on\n a specified channel.

    \n
  • \n
  • \n

    \n GetStreamSession — Gets metadata on a specified stream.

    \n
  • \n
  • \n

    \n ListStreams — Gets summary information about live streams in\n your account, in the Amazon Web Services region where the API request is processed.

    \n
  • \n
  • \n

    \n ListStreamSessions — Gets a summary of current and previous\n streams for a specified channel in your account, in the AWS region where the API request\n is processed.

    \n
  • \n
  • \n

    \n StopStream — Disconnects the incoming RTMPS stream for the\n specified channel. Can be used in conjunction with DeleteStreamKey to\n prevent further streaming to a channel.

    \n
  • \n
  • \n

    \n PutMetadata — Inserts metadata into the active stream of the\n specified channel. At most 5 requests per second per channel are allowed, each with a\n maximum 1 KB payload. (If 5 TPS is not sufficient for your needs, we recommend batching\n your data into a single PutMetadata call.) At most 155 requests per second per account are\n allowed.

    \n
  • \n
\n

\n Stream Key Endpoints\n

\n
    \n
  • \n

    \n CreateStreamKey — Creates a stream key, used to initiate a\n stream, for the specified channel ARN.

    \n
  • \n
  • \n

    \n GetStreamKey — Gets stream key information for the specified\n ARN.

    \n
  • \n
  • \n

    \n BatchGetStreamKey — Performs GetStreamKey on\n multiple ARNs simultaneously.

    \n
  • \n
  • \n

    \n ListStreamKeys — Gets summary information about stream keys\n for the specified channel.

    \n
  • \n
  • \n

    \n DeleteStreamKey — Deletes the stream key for the specified\n ARN, so it can no longer be used to stream.

    \n
  • \n
\n

\n Amazon Web Services Tags Endpoints\n

\n
    \n
  • \n

    \n TagResource — Adds or updates tags for the Amazon Web Services\n resource with the specified ARN.

    \n
  • \n
  • \n

    \n UntagResource — Removes tags from the resource with the\n specified ARN.

    \n
  • \n
  • \n

    \n ListTagsForResource — Gets information about Amazon Web Services tags for the specified ARN.

    \n
  • \n
", + "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP\n API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both\n requests and responses, including errors.

\n

The API is an Amazon Web Services regional service. For a list of supported regions and\n Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the\n Amazon Web Services General Reference.

\n

\n \n All API request parameters and URLs are case sensitive.\n \n \n

\n

For a summary of notable documentation changes in each release, see Document\n History.

\n

\n Allowed Header Values\n

\n
    \n
  • \n

    \n \n Accept:\n application/json

    \n
  • \n
  • \n

    \n \n Accept-Encoding:\n gzip, deflate

    \n
  • \n
  • \n

    \n \n Content-Type:\n application/json

    \n
  • \n
\n

\n Key Concepts\n

\n
    \n
  • \n

    \n Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream.

    \n
  • \n
  • \n

    \n Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. \n Treat the stream key like a secret, since it allows anyone to stream to the channel.\n \n

    \n
  • \n
  • \n

    \n Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token.

    \n
  • \n
  • \n

    \n Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration.

    \n
  • \n
  • \n

    \n Playback restriction policy — Restricts playback by countries and/or origin sites.

    \n
  • \n
\n

For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming.

\n

\n Tagging\n

\n

A tag is a metadata label that you assign to an Amazon Web Services\n resource. A tag comprises a key and a value, both\n set by you. For example, you might set a tag as topic:nature to label a\n particular video category. See Tagging Amazon Web Services Resources for\n more information, including restrictions that apply to tags and \"Tag naming limits and\n requirements\"; Amazon IVS has no service-specific constraints beyond what is documented\n there.

\n

Tags can help you identify and organize your Amazon Web Services resources. For example,\n you can use the same tag for different resources to indicate that they are related. You can\n also use tags to manage access (see Access Tags).

\n

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following\n resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording\n Configurations.

\n

At most 50 tags can be applied to a resource.

\n

\n Authentication versus Authorization\n

\n

Note the differences between these concepts:

\n
    \n
  • \n

    \n Authentication is about verifying identity. You need to be\n authenticated to sign Amazon IVS API requests.

    \n
  • \n
  • \n

    \n Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition,\n authorization is needed to view Amazon IVS private channels.\n (Private channels are channels that are enabled for \"playback authorization.\")

    \n
  • \n
\n

\n Authentication\n

\n

All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services\n Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying\n API calls for you. However, if your application calls the Amazon IVS API directly, it’s your\n responsibility to sign the requests.

\n

You generate a signature using valid Amazon Web Services credentials that have permission\n to perform the requested action. For example, you must sign PutMetadata requests with a\n signature generated from a user account that has the ivs:PutMetadata\n permission.

\n

For more information:

\n \n

\n Amazon Resource Names (ARNs)\n

\n

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a\n resource unambiguously across all of AWS, such as in IAM policies and API\n calls. For more information, see Amazon\n Resource Names in the AWS General Reference.

", "smithy.api#title": "Amazon Interactive Video Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1331,7 +1331,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$$" } }, "com.amazonaws.ivs#ChannelRecordingConfigurationArn": { @@ -1341,7 +1341,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$$" } }, "com.amazonaws.ivs#ChannelSummary": { @@ -3701,26 +3701,32 @@ } }, "com.amazonaws.ivs#RenditionConfigurationRendition": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "FULL_HD", - "name": "FULL_HD" - }, - { - "value": "HD", - "name": "HD" - }, - { - "value": "SD", - "name": "SD" - }, - { - "value": "LOWEST_RESOLUTION", - "name": "LOWEST_RESOLUTION" + "type": "enum", + "members": { + "SD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SD" } - ] + }, + "HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HD" + } + }, + "FULL_HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL_HD" + } + }, + "LOWEST_RESOLUTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOWEST_RESOLUTION" + } + } } }, "com.amazonaws.ivs#RenditionConfigurationRenditionList": { @@ -4537,26 +4543,32 @@ } }, "com.amazonaws.ivs#ThumbnailConfigurationResolution": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "FULL_HD", - "name": "FULL_HD" - }, - { - "value": "HD", - "name": "HD" - }, - { - "value": "SD", - "name": "SD" - }, - { - "value": "LOWEST_RESOLUTION", - "name": "LOWEST_RESOLUTION" + "type": "enum", + "members": { + "SD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SD" } - ] + }, + "HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HD" + } + }, + "FULL_HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL_HD" + } + }, + "LOWEST_RESOLUTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOWEST_RESOLUTION" + } + } } }, "com.amazonaws.ivs#ThumbnailConfigurationStorage": { diff --git a/models/ivschat.json b/models/ivschat.json index 38a532a2f3..584fd209f7 100644 --- a/models/ivschat.json +++ b/models/ivschat.json @@ -78,7 +78,7 @@ "sdkId": "ivschat", "arnNamespace": "ivschat", "cloudFormationName": "IVSChat", - "cloudTrailEventSource": "REPLACE_WITH_EVENT_SOURCE" + "cloudTrailEventSource": "ivschat.amazonaws.com" }, "aws.auth#sigv4": { "name": "ivschat" diff --git a/models/kinesis-video-webrtc-storage.json b/models/kinesis-video-webrtc-storage.json index f1080dd13c..7f9e198c02 100644 --- a/models/kinesis-video-webrtc-storage.json +++ b/models/kinesis-video-webrtc-storage.json @@ -7,6 +7,9 @@ "operations": [ { "target": "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSession" + }, + { + "target": "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionAsViewer" } ], "traits": { @@ -31,7 +34,7 @@ "X-Amz-User-Agent" ] }, - "smithy.api#documentation": "

\n

", + "smithy.api#documentation": "webrtc\n

\n

", "smithy.api#title": "Amazon Kinesis Video WebRTC Storage", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -75,7 +78,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -94,7 +96,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -122,13 +123,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -141,7 +143,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -155,7 +156,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,7 +178,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -213,11 +212,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -228,16 +225,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -251,14 +251,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -267,15 +265,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -286,16 +283,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -309,7 +309,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -329,11 +328,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -344,20 +341,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -368,18 +367,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -718,6 +721,16 @@ "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*):kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+$" } }, + "com.amazonaws.kinesisvideowebrtcstorage#ClientId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" + } + }, "com.amazonaws.kinesisvideowebrtcstorage#ClientLimitExceededException": { "type": "structure", "members": { @@ -767,7 +780,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Join the ongoing one way-video and/or multi-way audio WebRTC session as \n a video producing device for an input channel. If there’s no existing \n session for the channel, a new streaming session needs to be created, and the\n Amazon Resource Name (ARN) of the signaling channel must be provided.\n

\n

Currently for the SINGLE_MASTER type, a video producing\n device is able to ingest both audio and video media into a stream,\n while viewers can only ingest audio. Both a video producing device \n and viewers can join the session first, and wait for other participants.

\n

While participants are having peer to peer conversations through webRTC, \n the ingested media session will be stored into the Kinesis Video Stream.\n Multiple viewers are able to playback real-time media.

\n

Customers can also use existing Kinesis Video Streams features like \n HLS or DASH playback, Image generation, and more\n with ingested WebRTC media.

\n \n

Assume that only one video producing device client\n can be associated with a session for the channel. If more than one \n client joins the session of a specific channel as a video producing device,\n the most recent client request takes precedence.

\n
", + "smithy.api#documentation": "\n

Before using this API, you must call the GetSignalingChannelEndpoint API to request the WEBRTC endpoint. You then specify the endpoint and region in your JoinStorageSession API request.

\n
\n

Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing\n device for an input channel. If there’s no existing session for the channel, a new streaming\n session needs to be created, and the Amazon Resource Name (ARN) of the signaling channel must\n be provided.

\n

Currently for the SINGLE_MASTER type, a video producing\n device is able to ingest both audio and video media into a stream. Only video producing devices can join the session and record media.

\n \n

Both audio and video tracks are currently required for WebRTC ingestion.

\n

Current requirements:

\n
    \n
  • \n

    Video track: H.264

    \n
  • \n
  • \n

    Audio track: Opus

    \n
  • \n
\n
\n

The resulting ingested video in the Kinesis video stream will have the following\n parameters: H.264 video and AAC audio.

\n

Once a master participant has negotiated a connection through WebRTC, the ingested media\n session will be stored in the Kinesis video stream. Multiple viewers are then able to play\n back real-time media through our Playback APIs.

\n

You can also use existing Kinesis Video Streams features like HLS or\n DASH playback, image generation via GetImages, and more\n with ingested WebRTC media.

\n \n

S3 image delivery and notifications are not currently supported.

\n
\n \n

Assume that only one video producing device client\n can be associated with a session for the channel. If more than one \n client joins the session of a specific channel as a video producing device,\n the most recent client request takes precedence.

\n
\n

\n Additional information\n

\n
    \n
  • \n

    \n Idempotent - This API is not idempotent.

    \n
  • \n
  • \n

    \n Retry behavior - This is counted as a new API call.

    \n
  • \n
  • \n

    \n Concurrent calls - Concurrent calls are allowed. An offer is sent once per each call.

    \n
  • \n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -775,6 +788,59 @@ } } }, + "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionAsViewer": { + "type": "operation", + "input": { + "target": "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionAsViewerInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.kinesisvideowebrtcstorage#AccessDeniedException" + }, + { + "target": "com.amazonaws.kinesisvideowebrtcstorage#ClientLimitExceededException" + }, + { + "target": "com.amazonaws.kinesisvideowebrtcstorage#InvalidArgumentException" + }, + { + "target": "com.amazonaws.kinesisvideowebrtcstorage#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Join the ongoing one way-video and/or multi-way audio WebRTC session as \n a viewer for an input channel. If there’s\n no existing session for the channel, create a new streaming session and provide\n the Amazon Resource Name (ARN) of the signaling channel (channelArn)\n and client id (clientId).\n

\n

Currently for SINGLE_MASTER type, a video producing device\n is able to ingest both audio and video media into a stream, while viewers\n can only ingest audio. Both a video producing device and viewers can join\n a session first and wait for other participants. While participants are having peer to peer conversations through WebRTC,\n the ingested media session will be stored into the Kinesis Video Stream.\n Multiple viewers are able to playback real-time media.\n

\n

Customers can also use existing Kinesis Video Streams features like\n HLS or DASH playback, Image generation, and more \n with ingested WebRTC media. If there’s an existing session with the same\n clientId that's found in the join session request, the new request takes precedence.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/joinStorageSessionAsViewer" + } + } + }, + "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionAsViewerInput": { + "type": "structure", + "members": { + "channelArn": { + "target": "com.amazonaws.kinesisvideowebrtcstorage#ChannelArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the signaling channel.\n

", + "smithy.api#required": {} + } + }, + "clientId": { + "target": "com.amazonaws.kinesisvideowebrtcstorage#ClientId", + "traits": { + "smithy.api#documentation": "

\n The unique identifier for the sender client.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionInput": { "type": "structure", "members": { diff --git a/models/kinesis.json b/models/kinesis.json index f03db12797..4176b38755 100644 --- a/models/kinesis.json +++ b/models/kinesis.json @@ -670,6 +670,21 @@ "value": "control" } }, + "smithy.test#smokeTests": [ + { + "id": "DescribeStreamFailure", + "params": { + "StreamName": "bogus-stream-name" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "StreamExists": { "acceptors": [ @@ -6069,7 +6084,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "Limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListStreamsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.kinesis#ListStreamsInput": { diff --git a/models/lex-models-v2.json b/models/lex-models-v2.json index 4bb6405418..12c84bb0a1 100644 --- a/models/lex-models-v2.json +++ b/models/lex-models-v2.json @@ -2629,6 +2629,44 @@ "smithy.api#output": {} } }, + "com.amazonaws.lexmodelsv2#BedrockGuardrailConfiguration": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.lexmodelsv2#BedrockGuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The unique guardrail id for the Bedrock guardrail configuration.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.lexmodelsv2#BedrockGuardrailVersion", + "traits": { + "smithy.api#documentation": "

The guardrail version for the Bedrock guardrail configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details on the Bedrock guardrail configuration.

" + } + }, + "com.amazonaws.lexmodelsv2#BedrockGuardrailIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + } + }, + "com.amazonaws.lexmodelsv2#BedrockGuardrailVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(([1-9][0-9]{0,7})|(DRAFT))$" + } + }, "com.amazonaws.lexmodelsv2#BedrockKnowledgeBaseArn": { "type": "string", "traits": { @@ -2645,21 +2683,57 @@ "bedrockKnowledgeBaseArn": { "target": "com.amazonaws.lexmodelsv2#BedrockKnowledgeBaseArn", "traits": { - "smithy.api#documentation": "

The ARN of the knowledge base used.

", + "smithy.api#documentation": "

The base ARN of the knowledge base used.

", "smithy.api#required": {} } + }, + "exactResponse": { + "target": "com.amazonaws.lexmodelsv2#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Specifies whether to return an exact response, or to return an answer generated by the model, using the fields you specify from the database.

" + } + }, + "exactResponseFields": { + "target": "com.amazonaws.lexmodelsv2#BedrockKnowledgeStoreExactResponseFields", + "traits": { + "smithy.api#documentation": "

Contains the names of the fields used for an exact response to the user.

" + } } }, "traits": { "smithy.api#documentation": "

Contains details about the configuration of a Amazon Bedrock knowledge base.

" } }, + "com.amazonaws.lexmodelsv2#BedrockKnowledgeStoreExactResponseFields": { + "type": "structure", + "members": { + "answerField": { + "target": "com.amazonaws.lexmodelsv2#AnswerField", + "traits": { + "smithy.api#documentation": "

The answer field used for an exact response from Bedrock Knowledge Store.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The exact response fields given by the Bedrock knowledge store.

" + } + }, "com.amazonaws.lexmodelsv2#BedrockModelArn": { "type": "string", "traits": { "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model\\/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}$" } }, + "com.amazonaws.lexmodelsv2#BedrockModelCustomPrompt": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4000 + } + } + }, "com.amazonaws.lexmodelsv2#BedrockModelSpecification": { "type": "structure", "members": { @@ -2669,12 +2743,47 @@ "smithy.api#documentation": "

The ARN of the foundation model used in descriptive bot building.

", "smithy.api#required": {} } + }, + "guardrail": { + "target": "com.amazonaws.lexmodelsv2#BedrockGuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The guardrail configuration in the Bedrock model specification details.

" + } + }, + "traceStatus": { + "target": "com.amazonaws.lexmodelsv2#BedrockTraceStatus", + "traits": { + "smithy.api#documentation": "

The Bedrock trace status in the Bedrock model specification details.

" + } + }, + "customPrompt": { + "target": "com.amazonaws.lexmodelsv2#BedrockModelCustomPrompt", + "traits": { + "smithy.api#documentation": "

The custom prompt used in the Bedrock model specification details.

" + } } }, "traits": { "smithy.api#documentation": "

Contains information about the Amazon Bedrock model used to interpret the prompt used in descriptive bot building.

" } }, + "com.amazonaws.lexmodelsv2#BedrockTraceStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.lexmodelsv2#Boolean": { "type": "boolean", "traits": { @@ -6154,7 +6263,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds a new resource policy statement to a bot or bot alias. If a\n resource policy exists, the statement is added to the current resource\n policy. If a policy doesn't exist, a new policy is created.

\n

You can't create a resource policy statement that allows\n cross-account access.

", + "smithy.api#documentation": "

Adds a new resource policy statement to a bot or bot alias. If a\n resource policy exists, the statement is added to the current resource\n policy. If a policy doesn't exist, a new policy is created.

\n

You can't create a resource policy statement that allows\n cross-account access.

\n

You need to add the CreateResourcePolicy or UpdateResourcePolicy \n action to the bot role in order to call the API.

", "smithy.api#http": { "method": "POST", "uri": "/policy/{resourceArn}/statements", @@ -7972,7 +8081,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a policy statement from a resource policy. If you delete the\n last statement from a policy, the policy is deleted. If you specify a\n statement ID that doesn't exist in the policy, or if the bot or bot\n alias doesn't have a policy attached, Amazon Lex returns an\n exception.

", + "smithy.api#documentation": "

Deletes a policy statement from a resource policy. If you delete the\n last statement from a policy, the policy is deleted. If you specify a\n statement ID that doesn't exist in the policy, or if the bot or bot\n alias doesn't have a policy attached, Amazon Lex returns an\n exception.

\n

You need to add the DeleteResourcePolicy or UpdateResourcePolicy \n action to the bot role in order to call the API.

", "smithy.api#http": { "method": "DELETE", "uri": "/policy/{resourceArn}/statements/{statementId}", diff --git a/models/medialive.json b/models/medialive.json index 2a9c638ff4..68272e15e5 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -22357,6 +22357,18 @@ "smithy.api#documentation": "Multiplex Output Settings" } }, + "com.amazonaws.medialive#MultiplexPacketIdentifiersMapping": { + "type": "map", + "key": { + "target": "com.amazonaws.medialive#__string" + }, + "value": { + "target": "com.amazonaws.medialive#MultiplexProgramPacketIdentifiersMap" + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for MultiplexPacketIdentifiersMapping" + } + }, "com.amazonaws.medialive#MultiplexProgram": { "type": "structure", "members": { @@ -22502,6 +22514,30 @@ "traits": { "smithy.api#jsonName": "videoPid" } + }, + "AribCaptionsPid": { + "target": "com.amazonaws.medialive#__integer", + "traits": { + "smithy.api#jsonName": "aribCaptionsPid" + } + }, + "DvbTeletextPids": { + "target": "com.amazonaws.medialive#__listOf__integer", + "traits": { + "smithy.api#jsonName": "dvbTeletextPids" + } + }, + "EcmPid": { + "target": "com.amazonaws.medialive#__integer", + "traits": { + "smithy.api#jsonName": "ecmPid" + } + }, + "Smpte2038Pid": { + "target": "com.amazonaws.medialive#__integer", + "traits": { + "smithy.api#jsonName": "smpte2038Pid" + } } }, "traits": { @@ -30512,6 +30548,12 @@ "smithy.api#documentation": "Name of the multiplex.", "smithy.api#jsonName": "name" } + }, + "PacketIdentifiersMapping": { + "target": "com.amazonaws.medialive#MultiplexPacketIdentifiersMapping", + "traits": { + "smithy.api#jsonName": "packetIdentifiersMapping" + } } }, "traits": { diff --git a/models/mediapackagev2.json b/models/mediapackagev2.json index 74d29edc6f..86781cc506 100644 --- a/models/mediapackagev2.json +++ b/models/mediapackagev2.json @@ -2191,6 +2191,12 @@ "traits": { "smithy.api#enumValue": "WIDEVINE" } + }, + "IRDETO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IRDETO" + } } } }, diff --git a/models/medical-imaging.json b/models/medical-imaging.json index 4d0db1b204..26553e0db3 100644 --- a/models/medical-imaging.json +++ b/models/medical-imaging.json @@ -806,6 +806,16 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.medicalimaging#CopiableAttributes": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 260000 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.medicalimaging#CopyDestinationImageSet": { "type": "structure", "members": { @@ -969,6 +979,13 @@ "smithy.api#httpPayload": {}, "smithy.api#required": {} } + }, + "force": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Setting this flag will force the CopyImageSet operation, even if Patient, Study, or Series level\n metadata are mismatched across the sourceImageSet and destinationImageSet.

", + "smithy.api#httpQuery": "force" + } } }, "traits": { @@ -1013,6 +1030,12 @@ "smithy.api#documentation": "

The latest version identifier for the source image set.

", "smithy.api#required": {} } + }, + "DICOMCopies": { + "target": "com.amazonaws.medicalimaging#MetadataCopies", + "traits": { + "smithy.api#documentation": "

Contains MetadataCopies structure and wraps information related to specific copy use cases.\n For example, when copying subsets.

" + } } }, "traits": { @@ -1173,7 +1196,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 16 + "max": 256 }, "smithy.api#sensitive": {} } @@ -1345,7 +1368,7 @@ "smithy.api#default": 0, "smithy.api#range": { "min": 0, - "max": 10000 + "max": 1000000 } } }, @@ -1364,7 +1387,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 64 + "max": 256 }, "smithy.api#sensitive": {} } @@ -1404,9 +1427,9 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 64 + "max": 256 }, - "smithy.api#pattern": "^(?:[1-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$", + "smithy.api#pattern": "^(?:[0-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$", "smithy.api#sensitive": {} } }, @@ -1486,9 +1509,9 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 64 + "max": 256 }, - "smithy.api#pattern": "^(?:[1-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$", + "smithy.api#pattern": "^(?:[0-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*$", "smithy.api#sensitive": {} } }, @@ -2466,6 +2489,12 @@ "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) assigned to the image set.

" } + }, + "overrides": { + "target": "com.amazonaws.medicalimaging#Overrides", + "traits": { + "smithy.api#documentation": "

This object contains the details of any overrides used while creating a specific image set version.\n If an image set was copied or updated using the force flag, this object will contain the\n forced flag.

" + } } }, "traits": { @@ -2564,6 +2593,12 @@ "traits": { "smithy.api#documentation": "

The error message thrown if an image set action fails.

" } + }, + "overrides": { + "target": "com.amazonaws.medicalimaging#Overrides", + "traits": { + "smithy.api#documentation": "

Contains details on overrides used when creating the returned version of an image set.\n For example, if forced exists, the forced flag was used when \n creating the image set.

" + } } }, "traits": { @@ -3185,6 +3220,21 @@ "smithy.api#pattern": "^[\\w -:]+$" } }, + "com.amazonaws.medicalimaging#MetadataCopies": { + "type": "structure", + "members": { + "copiableAttributes": { + "target": "com.amazonaws.medicalimaging#CopiableAttributes", + "traits": { + "smithy.api#documentation": "

The JSON string used to specify a subset of SOP Instances to copy from source to destination image set.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains copiable Attributes structure and wraps information related to specific copy use cases.\n For example, when copying subsets.

" + } + }, "com.amazonaws.medicalimaging#MetadataUpdates": { "type": "union", "members": { @@ -3193,6 +3243,12 @@ "traits": { "smithy.api#documentation": "

The object containing removableAttributes and updatableAttributes.

" } + }, + "revertToVersionId": { + "target": "com.amazonaws.medicalimaging#ImageSetExternalVersionId", + "traits": { + "smithy.api#documentation": "

Specifies the previous image set version ID to revert the current image set back to.

\n \n

You must provide either revertToVersionId or DICOMUpdates in your request. A \n ValidationException error is thrown if both parameters are provided at the same time.

\n
" + } } }, "traits": { @@ -3226,6 +3282,20 @@ } } }, + "com.amazonaws.medicalimaging#Overrides": { + "type": "structure", + "members": { + "forced": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Setting this flag will force the CopyImageSet and UpdateImageSetMetadata\n operations, even if Patient, Study, or Series level metadata are mismatched.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the overrides used in image set modification calls to CopyImageSet and \n UpdateImageSetMetadata.

" + } + }, "com.amazonaws.medicalimaging#PayloadBlob": { "type": "blob", "traits": { @@ -4005,6 +4075,13 @@ "smithy.api#required": {} } }, + "force": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Setting this flag will force the UpdateImageSetMetadata operation for the following attributes:

\n
    \n
  • \n

    \n Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, and Tag.StudyID\n

    \n
  • \n
  • \n

    Adding, removing, or updating private tags for an individual SOP Instance

    \n
  • \n
", + "smithy.api#httpQuery": "force" + } + }, "updateImageSetMetadataUpdates": { "target": "com.amazonaws.medicalimaging#MetadataUpdates", "traits": { diff --git a/models/memorydb.json b/models/memorydb.json index 97b84e0482..a3376ad257 100644 --- a/models/memorydb.json +++ b/models/memorydb.json @@ -362,7 +362,7 @@ "name": "memorydb" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands.

", + "smithy.api#documentation": "

MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands.

", "smithy.api#title": "Amazon MemoryDB", "smithy.api#xmlNamespace": { "uri": "http://memorydb.amazonaws.com/doc/2021-01-01/" @@ -409,7 +409,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -452,7 +451,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -465,7 +465,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -479,7 +478,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -502,7 +500,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -537,7 +534,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -548,14 +544,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -569,14 +567,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -585,11 +581,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -600,14 +596,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -621,7 +619,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -641,7 +638,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -652,14 +648,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -697,9 +695,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1464,13 +1464,13 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis engine version used by the cluster

" + "smithy.api#documentation": "

The Redis OSS engine version used by the cluster

" } }, "EnginePatchVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis engine patch version used by the cluster

" + "smithy.api#documentation": "

The Redis OSS engine patch version used by the cluster

" } }, "ParameterGroupName": { @@ -1609,7 +1609,7 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis engine version used by the cluster

" + "smithy.api#documentation": "

The Redis OSS engine version used by the cluster

" } }, "MaintenanceWindow": { @@ -2111,7 +2111,7 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The version number of the Redis engine to be used for the cluster.

" + "smithy.api#documentation": "

The version number of the Redis OSS engine to be used for the cluster.

" } }, "AutoMinorVersionUpgrade": { @@ -2593,7 +2593,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a cluster. It also deletes all associated nodes and node endpoints

" + "smithy.api#documentation": "

Deletes a cluster. It also deletes all associated nodes and node endpoints

\n \n

\n CreateSnapshot permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied exception.

\n
" } }, "com.amazonaws.memorydb#DeleteClusterRequest": { @@ -3027,7 +3027,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of the available Redis engine versions.

", + "smithy.api#documentation": "

Returns a list of the available Redis OSS engine versions.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3042,7 +3042,7 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis engine version

" + "smithy.api#documentation": "

The Redis OSS engine version

" } }, "ParameterGroupFamily": { @@ -3946,7 +3946,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides details of the Redis engine version

" + "smithy.api#documentation": "

Provides details of the Redis OSS engine version

" } }, "com.amazonaws.memorydb#EngineVersionInfoList": { diff --git a/models/mobile.json b/models/mobile.json deleted file mode 100644 index 86d8956971..0000000000 --- a/models/mobile.json +++ /dev/null @@ -1,1922 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.mobile#AWSMobileService": { - "type": "service", - "version": "2017-07-01", - "operations": [ - { - "target": "com.amazonaws.mobile#CreateProject" - }, - { - "target": "com.amazonaws.mobile#DeleteProject" - }, - { - "target": "com.amazonaws.mobile#DescribeBundle" - }, - { - "target": "com.amazonaws.mobile#DescribeProject" - }, - { - "target": "com.amazonaws.mobile#ExportBundle" - }, - { - "target": "com.amazonaws.mobile#ExportProject" - }, - { - "target": "com.amazonaws.mobile#ListBundles" - }, - { - "target": "com.amazonaws.mobile#ListProjects" - }, - { - "target": "com.amazonaws.mobile#UpdateProject" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "Mobile", - "arnNamespace": "awsmobilehubservice", - "cloudFormationName": "Mobile", - "cloudTrailEventSource": "mobile.amazonaws.com", - "endpointPrefix": "mobile" - }, - "aws.auth#sigv4": { - "name": "AWSMobileHubService" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

\n AWS Mobile Service provides mobile app and website developers with capabilities\n required to configure AWS resources and bootstrap their developer desktop projects\n with the necessary SDKs, constants, tools and samples to make use of those resources.\n

", - "smithy.api#title": "AWS Mobile", - "smithy.api#xmlNamespace": { - "uri": "http://mobile.amazonaws.com" - }, - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://mobile.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - }, - "com.amazonaws.mobile#AccountActionRequiredException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n Account Action is required in order to continue the request.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.mobile#AttributeKey": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Key part of key-value attribute pairs.\n

" - } - }, - "com.amazonaws.mobile#AttributeValue": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Value part of key-value attribute pairs.\n

" - } - }, - "com.amazonaws.mobile#Attributes": { - "type": "map", - "key": { - "target": "com.amazonaws.mobile#AttributeKey" - }, - "value": { - "target": "com.amazonaws.mobile#AttributeValue" - }, - "traits": { - "smithy.api#documentation": "

\n Key-value attribute pairs.\n

" - } - }, - "com.amazonaws.mobile#BadRequestException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n The request cannot be processed because some parameter is not valid or the project\n state prevents the operation from being performed.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.mobile#Boolean": { - "type": "boolean", - "traits": { - "smithy.api#default": false - } - }, - "com.amazonaws.mobile#BundleDescription": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Description of the download bundle.\n

" - } - }, - "com.amazonaws.mobile#BundleDetails": { - "type": "structure", - "members": { - "bundleId": { - "target": "com.amazonaws.mobile#BundleId" - }, - "title": { - "target": "com.amazonaws.mobile#BundleTitle" - }, - "version": { - "target": "com.amazonaws.mobile#BundleVersion" - }, - "description": { - "target": "com.amazonaws.mobile#BundleDescription" - }, - "iconUrl": { - "target": "com.amazonaws.mobile#IconUrl" - }, - "availablePlatforms": { - "target": "com.amazonaws.mobile#Platforms" - } - }, - "traits": { - "smithy.api#documentation": "

\n The details of the bundle.\n

" - } - }, - "com.amazonaws.mobile#BundleId": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Unique bundle identifier.\n

" - } - }, - "com.amazonaws.mobile#BundleList": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#BundleDetails" - }, - "traits": { - "smithy.api#documentation": "

\n A list of bundles.\n

" - } - }, - "com.amazonaws.mobile#BundleTitle": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Title of the download bundle.\n

" - } - }, - "com.amazonaws.mobile#BundleVersion": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Version of the download bundle.\n

" - } - }, - "com.amazonaws.mobile#ConsoleUrl": { - "type": "string" - }, - "com.amazonaws.mobile#Contents": { - "type": "blob", - "traits": { - "smithy.api#documentation": "

\n Binary file data.\n

" - } - }, - "com.amazonaws.mobile#CreateProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#CreateProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#CreateProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#LimitExceededException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Creates an AWS Mobile Hub project.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/projects", - "code": 200 - } - } - }, - "com.amazonaws.mobile#CreateProjectRequest": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.mobile#ProjectName", - "traits": { - "smithy.api#documentation": "

\n Name of the project.\n

", - "smithy.api#httpQuery": "name" - } - }, - "region": { - "target": "com.amazonaws.mobile#ProjectRegion", - "traits": { - "smithy.api#documentation": "

\n Default region where project resources should be created.\n

", - "smithy.api#httpQuery": "region" - } - }, - "contents": { - "target": "com.amazonaws.mobile#Contents", - "traits": { - "smithy.api#documentation": "

\n ZIP or YAML file which contains configuration settings to be used when creating\n the project. This may be the contents of the file downloaded from the URL provided\n in an export project operation.\n

", - "smithy.api#httpPayload": {} - } - }, - "snapshotId": { - "target": "com.amazonaws.mobile#SnapshotId", - "traits": { - "smithy.api#documentation": "

\n Unique identifier for an exported snapshot of project configuration. This\n snapshot identifier is included in the share URL when a project is exported.\n

", - "smithy.api#httpQuery": "snapshotId" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used to request a project be created.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#CreateProjectResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#ProjectDetails", - "traits": { - "smithy.api#documentation": "

\n Detailed information about the created AWS Mobile Hub project.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure used in response to a request to create a project.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#Date": { - "type": "timestamp" - }, - "com.amazonaws.mobile#DeleteProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#DeleteProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#DeleteProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Delets a project in AWS Mobile Hub.\n

", - "smithy.api#http": { - "method": "DELETE", - "uri": "/projects/{projectId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#DeleteProjectRequest": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used to request a project be deleted.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#DeleteProjectResult": { - "type": "structure", - "members": { - "deletedResources": { - "target": "com.amazonaws.mobile#Resources", - "traits": { - "smithy.api#documentation": "

\n Resources which were deleted.\n

" - } - }, - "orphanedResources": { - "target": "com.amazonaws.mobile#Resources", - "traits": { - "smithy.api#documentation": "

\n Resources which were not deleted, due to a risk of losing potentially\n important data or files.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure used in response to request to delete a project.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#DescribeBundle": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#DescribeBundleRequest" - }, - "output": { - "target": "com.amazonaws.mobile#DescribeBundleResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Get the bundle details for the requested bundle id.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/bundles/{bundleId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#DescribeBundleRequest": { - "type": "structure", - "members": { - "bundleId": { - "target": "com.amazonaws.mobile#BundleId", - "traits": { - "smithy.api#documentation": "

\n Unique bundle identifier.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure to request the details of a specific bundle.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#DescribeBundleResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#BundleDetails", - "traits": { - "smithy.api#documentation": "

\n The details of the bundle.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure contains the details of the bundle.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#DescribeProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#DescribeProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#DescribeProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Gets details about a project in AWS Mobile Hub.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/project", - "code": 200 - } - } - }, - "com.amazonaws.mobile#DescribeProjectRequest": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

", - "smithy.api#httpQuery": "projectId", - "smithy.api#required": {} - } - }, - "syncFromResources": { - "target": "com.amazonaws.mobile#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

\n If set to true, causes AWS Mobile Hub to synchronize information from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub project.\n

", - "smithy.api#httpQuery": "syncFromResources" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used to request details about a project.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#DescribeProjectResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#ProjectDetails" - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure used for requests of project details.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#DownloadUrl": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n The download Url.\n

" - } - }, - "com.amazonaws.mobile#ErrorMessage": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n The Exception Error Message.\n

" - } - }, - "com.amazonaws.mobile#ExportBundle": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ExportBundleRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ExportBundleResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Generates customized software development kit (SDK) and or tool packages\n used to integrate mobile web or mobile app clients with backend AWS resources.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/bundles/{bundleId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#ExportBundleRequest": { - "type": "structure", - "members": { - "bundleId": { - "target": "com.amazonaws.mobile#BundleId", - "traits": { - "smithy.api#documentation": "

\n Unique bundle identifier.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

", - "smithy.api#httpQuery": "projectId" - } - }, - "platform": { - "target": "com.amazonaws.mobile#Platform", - "traits": { - "smithy.api#documentation": "

\n Developer desktop or target application platform.\n

", - "smithy.api#httpQuery": "platform" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used to request generation of custom SDK and tool packages\n required to integrate mobile web or app clients with backed AWS resources.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ExportBundleResult": { - "type": "structure", - "members": { - "downloadUrl": { - "target": "com.amazonaws.mobile#DownloadUrl", - "traits": { - "smithy.api#documentation": "

\n URL which contains the custom-generated SDK and tool packages used\n to integrate the client mobile app or web app with the AWS resources\n created by the AWS Mobile Hub project.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure which contains link to download custom-generated SDK and\n tool packages used to integrate mobile web or app clients with backed\n AWS resources.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#ExportProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ExportProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ExportProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Exports project configuration to a snapshot which can be downloaded and shared.\n Note that mobile app push credentials are encrypted in exported projects, so they\n can only be shared successfully within the same AWS account.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/exports/{projectId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#ExportProjectRequest": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used in requests to export project configuration details.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ExportProjectResult": { - "type": "structure", - "members": { - "downloadUrl": { - "target": "com.amazonaws.mobile#DownloadUrl", - "traits": { - "smithy.api#documentation": "

\n URL which can be used to download the exported project configuation file(s).\n

" - } - }, - "shareUrl": { - "target": "com.amazonaws.mobile#ShareUrl", - "traits": { - "smithy.api#documentation": "

\n URL which can be shared to allow other AWS users to create their own project\n in AWS Mobile Hub with the same configuration as the specified project. This\n URL pertains to a snapshot in time of the project configuration that is created\n when this API is called. If you want to share additional changes to your project\n configuration, then you will need to create and share a new snapshot by calling\n this method again.\n

" - } - }, - "snapshotId": { - "target": "com.amazonaws.mobile#SnapshotId", - "traits": { - "smithy.api#documentation": "

\n Unique identifier for the exported snapshot of the project configuration. This\n snapshot identifier is included in the share URL.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure used for requests to export project configuration details.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#Feature": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Identifies which feature in AWS Mobile Hub is associated with this AWS resource.\n

" - } - }, - "com.amazonaws.mobile#IconUrl": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Icon for the download bundle.\n

" - } - }, - "com.amazonaws.mobile#InternalFailureException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n The service has encountered an unexpected error condition which prevents it from\n servicing the request.\n

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.mobile#LimitExceededException": { - "type": "structure", - "members": { - "retryAfterSeconds": { - "target": "com.amazonaws.mobile#ErrorMessage", - "traits": { - "smithy.api#httpHeader": "Retry-After" - } - }, - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n There are too many AWS Mobile Hub projects in the account or the account has\n exceeded the maximum number of resources in some AWS service. You should create\n another sub-account using AWS Organizations or remove some resources and retry\n your request.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.mobile#ListBundles": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ListBundlesRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ListBundlesResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n List all available bundles.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/bundles", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.mobile#ListBundlesRequest": { - "type": "structure", - "members": { - "maxResults": { - "target": "com.amazonaws.mobile#MaxResults", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Maximum number of records to list in a single response.\n

", - "smithy.api#httpQuery": "maxResults" - } - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken", - "traits": { - "smithy.api#documentation": "

\n Pagination token. Set to null to start listing bundles from start.\n If non-null pagination token is returned in a result, then pass its\n value in here in another request to list more bundles.\n

", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure to request all available bundles.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ListBundlesResult": { - "type": "structure", - "members": { - "bundleList": { - "target": "com.amazonaws.mobile#BundleList", - "traits": { - "smithy.api#documentation": "

\n A list of bundles.\n

" - } - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken", - "traits": { - "smithy.api#documentation": "

\n Pagination token. If non-null pagination token is returned in a result,\n then pass its value in another request to fetch more entries.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure contains a list of all available bundles with details.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#ListProjects": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ListProjectsRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ListProjectsResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Lists projects in AWS Mobile Hub.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/projects", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.mobile#ListProjectsRequest": { - "type": "structure", - "members": { - "maxResults": { - "target": "com.amazonaws.mobile#MaxResults", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Maximum number of records to list in a single response.\n

", - "smithy.api#httpQuery": "maxResults" - } - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken", - "traits": { - "smithy.api#documentation": "

\n Pagination token. Set to null to start listing projects from start.\n If non-null pagination token is returned in a result, then pass its\n value in here in another request to list more projects.\n

", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used to request projects list in AWS Mobile Hub.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ListProjectsResult": { - "type": "structure", - "members": { - "projects": { - "target": "com.amazonaws.mobile#ProjectSummaries" - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken" - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure used for requests to list projects in AWS Mobile Hub.\n

", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Maximum number of records to list in a single response.\n

" - } - }, - "com.amazonaws.mobile#NextToken": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Pagination token. Set to null to start listing records from start.\n If non-null pagination token is returned in a result, then pass its\n value in here in another request to list more entries.\n

" - } - }, - "com.amazonaws.mobile#NotFoundException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n No entity can be found with the specified identifier.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.mobile#Platform": { - "type": "enum", - "members": { - "OSX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OSX" - } - }, - "WINDOWS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WINDOWS" - } - }, - "LINUX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LINUX" - } - }, - "OBJC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OBJC" - } - }, - "SWIFT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SWIFT" - } - }, - "ANDROID": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ANDROID" - } - }, - "JAVASCRIPT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "JAVASCRIPT" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Developer desktop or target mobile app or website platform.\n

" - } - }, - "com.amazonaws.mobile#Platforms": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#Platform" - }, - "traits": { - "smithy.api#documentation": "

\n Developer desktop or mobile app or website platforms.\n

" - } - }, - "com.amazonaws.mobile#ProjectDetails": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.mobile#ProjectName" - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId" - }, - "region": { - "target": "com.amazonaws.mobile#ProjectRegion" - }, - "state": { - "target": "com.amazonaws.mobile#ProjectState" - }, - "createdDate": { - "target": "com.amazonaws.mobile#Date", - "traits": { - "smithy.api#documentation": "

\n Date the project was created.\n

" - } - }, - "lastUpdatedDate": { - "target": "com.amazonaws.mobile#Date", - "traits": { - "smithy.api#documentation": "

\n Date of the last modification of the project.\n

" - } - }, - "consoleUrl": { - "target": "com.amazonaws.mobile#ConsoleUrl", - "traits": { - "smithy.api#documentation": "

\n Website URL for this project in the AWS Mobile Hub console.\n

" - } - }, - "resources": { - "target": "com.amazonaws.mobile#Resources" - } - }, - "traits": { - "smithy.api#documentation": "

\n Detailed information about an AWS Mobile Hub project.\n

" - } - }, - "com.amazonaws.mobile#ProjectId": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

" - } - }, - "com.amazonaws.mobile#ProjectName": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Name of the project.\n

" - } - }, - "com.amazonaws.mobile#ProjectRegion": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Default region to use for AWS resource creation in the AWS Mobile Hub project.\n

" - } - }, - "com.amazonaws.mobile#ProjectState": { - "type": "enum", - "members": { - "NORMAL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NORMAL" - } - }, - "SYNCING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SYNCING" - } - }, - "IMPORTING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IMPORTING" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Synchronization state for a project.\n

" - } - }, - "com.amazonaws.mobile#ProjectSummaries": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#ProjectSummary" - }, - "traits": { - "smithy.api#documentation": "

\n List of projects.\n

" - } - }, - "com.amazonaws.mobile#ProjectSummary": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.mobile#ProjectName", - "traits": { - "smithy.api#documentation": "

\n Name of the project.\n

" - } - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Summary information about an AWS Mobile Hub project.\n

" - } - }, - "com.amazonaws.mobile#Resource": { - "type": "structure", - "members": { - "type": { - "target": "com.amazonaws.mobile#ResourceType" - }, - "name": { - "target": "com.amazonaws.mobile#ResourceName" - }, - "arn": { - "target": "com.amazonaws.mobile#ResourceArn" - }, - "feature": { - "target": "com.amazonaws.mobile#Feature" - }, - "attributes": { - "target": "com.amazonaws.mobile#Attributes" - } - }, - "traits": { - "smithy.api#documentation": "

\n Information about an instance of an AWS resource associated with a project.\n

" - } - }, - "com.amazonaws.mobile#ResourceArn": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n AWS resource name which uniquely identifies the resource in AWS systems.\n

" - } - }, - "com.amazonaws.mobile#ResourceName": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Name of the AWS resource (e.g., for an Amazon S3 bucket this is the name of the bucket).\n

" - } - }, - "com.amazonaws.mobile#ResourceType": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Simplified name for type of AWS resource (e.g., bucket is an Amazon S3 bucket).\n

" - } - }, - "com.amazonaws.mobile#Resources": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#Resource" - }, - "traits": { - "smithy.api#documentation": "

\n List of AWS resources associated with a project.\n

" - } - }, - "com.amazonaws.mobile#ServiceUnavailableException": { - "type": "structure", - "members": { - "retryAfterSeconds": { - "target": "com.amazonaws.mobile#ErrorMessage", - "traits": { - "smithy.api#httpHeader": "Retry-After" - } - }, - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n The service is temporarily unavailable. The request should be retried after some\n time delay.\n

", - "smithy.api#error": "server", - "smithy.api#httpError": 503 - } - }, - "com.amazonaws.mobile#ShareUrl": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n URL which can be shared to allow other AWS users to create their own project\n in AWS Mobile Hub with the same configuration as the specified project. This\n URL pertains to a snapshot in time of the project configuration that is created\n when this API is called. If you want to share additional changes to your project\n configuration, then you will need to create and share a new snapshot by calling\n this method again.\n

" - } - }, - "com.amazonaws.mobile#SnapshotId": { - "type": "string", - "traits": { - "smithy.api#documentation": "

\n Unique identifier for the exported snapshot of the project configuration. This\n snapshot identifier is included in the share URL.\n

" - } - }, - "com.amazonaws.mobile#TooManyRequestsException": { - "type": "structure", - "members": { - "retryAfterSeconds": { - "target": "com.amazonaws.mobile#ErrorMessage", - "traits": { - "smithy.api#httpHeader": "Retry-After" - } - }, - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n Too many requests have been received for this AWS account in too short a time. The\n request should be retried after some time delay.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.mobile#UnauthorizedException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n Credentials of the caller are insufficient to authorize the request.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 401 - } - }, - "com.amazonaws.mobile#UpdateProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#UpdateProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#UpdateProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#AccountActionRequiredException" - }, - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#LimitExceededException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "

\n Update an existing project.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/update", - "code": 200 - } - } - }, - "com.amazonaws.mobile#UpdateProjectRequest": { - "type": "structure", - "members": { - "contents": { - "target": "com.amazonaws.mobile#Contents", - "traits": { - "smithy.api#documentation": "

\n ZIP or YAML file which contains project configuration to be updated. This should\n be the contents of the file downloaded from the URL provided in an export project\n operation.\n

", - "smithy.api#httpPayload": {} - } - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "

\n Unique project identifier.\n

", - "smithy.api#httpQuery": "projectId", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request structure used for requests to update project configuration.\n

", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#UpdateProjectResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#ProjectDetails", - "traits": { - "smithy.api#documentation": "

\n Detailed information about the updated AWS Mobile Hub project.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Result structure used for requests to updated project configuration.\n

", - "smithy.api#output": {} - } - } - } -} diff --git a/models/neptune-graph.json b/models/neptune-graph.json index effea5b89e..03a6260575 100644 --- a/models/neptune-graph.json +++ b/models/neptune-graph.json @@ -1534,6 +1534,17 @@ "smithy.api#pattern": "^arn:.+$" } }, + "com.amazonaws.neptunegraph#BlankNodeHandling": { + "type": "enum", + "members": { + "CONVERT_TO_IRI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "convertToIri" + } + } + } + }, "com.amazonaws.neptunegraph#CancelImportTask": { "type": "operation", "input": { @@ -2187,6 +2198,12 @@ "smithy.api#documentation": "

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies\n the Gremlin\n CSV format or OPENCYPHER, which identies the openCypher\n load format.

" } }, + "blankNodeHandling": { + "target": "com.amazonaws.neptunegraph#BlankNodeHandling", + "traits": { + "smithy.api#documentation": "

The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, \n meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. \n For more information, see Handling RDF values.

" + } + }, "roleArn": { "target": "com.amazonaws.neptunegraph#RoleArn", "traits": { @@ -2225,7 +2242,7 @@ "format": { "target": "com.amazonaws.neptunegraph#Format", "traits": { - "smithy.api#documentation": "

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies\n the Gremlin\n CSV format or OPENCYPHER, which identies the openCypher\n load format.

" + "smithy.api#documentation": "

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies\n the Gremlin\n CSV format, OPENCYPHER, which identifies the openCypher\n load format, or ntriples, which identifies the\n RDF n-triples format.

" } }, "roleArn": { @@ -2932,6 +2949,12 @@ "traits": { "smithy.api#enumValue": "OPEN_CYPHER" } + }, + "NTRIPLES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NTRIPLES" + } } } }, @@ -5215,7 +5238,7 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 128, + "min": 32, "max": 24576 } } @@ -5912,6 +5935,12 @@ "smithy.api#documentation": "

Specifies the format of Amazon S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or \n OPENCYPHER, which identies the openCypher load format.

" } }, + "blankNodeHandling": { + "target": "com.amazonaws.neptunegraph#BlankNodeHandling", + "traits": { + "smithy.api#documentation": "

The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, \n meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. \n For more information, see Handling RDF values.

" + } + }, "graphIdentifier": { "target": "com.amazonaws.neptunegraph#GraphIdentifier", "traits": { diff --git a/models/network-firewall.json b/models/network-firewall.json index 631dc3cea1..63ff40ba38 100644 --- a/models/network-firewall.json +++ b/models/network-firewall.json @@ -804,7 +804,7 @@ "Capacity": { "target": "com.amazonaws.networkfirewall#RuleCapacity", "traits": { - "smithy.api#documentation": "

The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation.\n When you update a rule group, you are limited to this capacity. When you reference a rule group\n from a firewall policy, Network Firewall reserves this capacity for the rule group.

\n

You can retrieve the capacity that would be required for a rule group before you create the rule group by calling\n CreateRuleGroup with DryRun set to TRUE.

\n \n

You can't change or exceed this capacity when you update the rule group, so leave\n room for your rule group to grow.

\n
\n

\n Capacity for a stateless rule group\n

\n

For a stateless rule group, the capacity required is the sum of the capacity\n requirements of the individual rules that you expect to have in the rule group.

\n

To calculate the capacity requirement of a single rule, multiply the capacity\n requirement values of each of the rule's match settings:

\n
    \n
  • \n

    A match setting with no criteria specified has a value of 1.

    \n
  • \n
  • \n

    A match setting with Any specified has a value of 1.

    \n
  • \n
  • \n

    All other match settings have a value equal to the number of elements provided in\n the setting. For example, a protocol setting [\"UDP\"] and a source setting\n [\"10.0.0.0/24\"] each have a value of 1. A protocol setting [\"UDP\",\"TCP\"] has a value\n of 2. A source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"] has a value of 3.\n

    \n
  • \n
\n

A rule with no criteria specified in any of its match settings has a capacity\n requirement of 1. A rule with protocol setting [\"UDP\",\"TCP\"], source setting\n [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"], and a single specification or no specification\n for each of the other match settings has a capacity requirement of 6.

\n

\n Capacity for a stateful rule group\n

\n

For\n a stateful rule group, the minimum capacity required is the number of individual rules that\n you expect to have in the rule group.

", + "smithy.api#documentation": "

The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation.\n When you update a rule group, you are limited to this capacity. When you reference a rule group\n from a firewall policy, Network Firewall reserves this capacity for the rule group.

\n

You can retrieve the capacity that would be required for a rule group before you create the rule group by calling\n CreateRuleGroup with DryRun set to TRUE.

\n \n

You can't change or exceed this capacity when you update the rule group, so leave\n room for your rule group to grow.

\n
\n

\n Capacity for a stateless rule group\n

\n

For a stateless rule group, the capacity required is the sum of the capacity\n requirements of the individual rules that you expect to have in the rule group.

\n

To calculate the capacity requirement of a single rule, multiply the capacity\n requirement values of each of the rule's match settings:

\n
    \n
  • \n

    A match setting with no criteria specified has a value of 1.

    \n
  • \n
  • \n

    A match setting with Any specified has a value of 1.

    \n
  • \n
  • \n

    All other match settings have a value equal to the number of elements provided in\n the setting. For example, a protocol setting [\"UDP\"] and a source setting\n [\"10.0.0.0/24\"] each have a value of 1. A protocol setting [\"UDP\",\"TCP\"] has a value\n of 2. A source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"] has a value of 3.\n

    \n
  • \n
\n

A rule with no criteria specified in any of its match settings has a capacity\n requirement of 1. A rule with protocol setting [\"UDP\",\"TCP\"], source setting\n [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"], and a single specification or no specification\n for each of the other match settings has a capacity requirement of 6.

\n

\n Capacity for a stateful rule group\n

\n

For a stateful rule group, the minimum capacity required is the number of individual rules that\n you expect to have in the rule group.

", "smithy.api#required": {} } }, @@ -893,7 +893,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains Certificate Manager certificate associations between and the scope configurations that Network Firewall uses to decrypt and re-encrypt traffic traveling through your firewall.

\n

After you create a TLS inspection configuration, you can associate it with a new firewall policy.

\n

To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

\n

To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

\n

To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

\n

\n For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS\ninspection configurations in the Network Firewall Developer Guide.\n

" + "smithy.api#documentation": "

Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall.

\n

To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

\n

To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

\n

To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

\n

\n For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS\ninspection configurations in the Network Firewall Developer Guide.\n

" } }, "com.amazonaws.networkfirewall#CreateTLSInspectionConfigurationRequest": { @@ -3073,27 +3073,27 @@ "LogType": { "target": "com.amazonaws.networkfirewall#LogType", "traits": { - "smithy.api#documentation": "

The type of log to send. Alert logs report traffic that matches a StatefulRule with an action setting that sends an alert log message. Flow logs are\n standard network traffic flow logs.

", + "smithy.api#documentation": "

The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.

\n
    \n
  • \n

    \n ALERT - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see StatefulRule.

    \n
  • \n
  • \n

    \n FLOW - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.

    \n
  • \n
  • \n

    \n TLS - Logs for events that are related to TLS inspection. For more information, see \n Inspecting SSL/TLS traffic with TLS inspection configurations \n in the Network Firewall Developer Guide.

    \n
  • \n
", "smithy.api#required": {} } }, "LogDestinationType": { "target": "com.amazonaws.networkfirewall#LogDestinationType", "traits": { - "smithy.api#documentation": "

The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket,\n a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

", + "smithy.api#documentation": "

The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket,\n a CloudWatch log group, or a Firehose delivery stream.

", "smithy.api#required": {} } }, "LogDestination": { "target": "com.amazonaws.networkfirewall#LogDestinationMap", "traits": { - "smithy.api#documentation": "

The named location for the logs, provided in a key:value mapping that is specific to the\n chosen destination type.

\n
    \n
  • \n

    For an Amazon S3 bucket, provide the name of the bucket, with key bucketName,\n and optionally provide a prefix, with key prefix. The following example\n specifies an Amazon S3 bucket named\n DOC-EXAMPLE-BUCKET and the prefix alerts:

    \n

    \n \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\"\n }\n

    \n
  • \n
  • \n

    For a CloudWatch log group, provide the name of the CloudWatch log group, with key\n logGroup. The following example specifies a log group named\n alert-log-group:

    \n

    \n \"LogDestination\": { \"logGroup\": \"alert-log-group\" }\n

    \n
  • \n
  • \n

    For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key\n deliveryStream. The following example specifies a delivery stream\n named alert-delivery-stream:

    \n

    \n \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\"\n }\n

    \n
  • \n
", + "smithy.api#documentation": "

The named location for the logs, provided in a key:value mapping that is specific to the\n chosen destination type.

\n
    \n
  • \n

    For an Amazon S3 bucket, provide the name of the bucket, with key bucketName,\n and optionally provide a prefix, with key prefix.

    \n

    The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts:

    \n

    \n \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\"\n }\n

    \n
  • \n
  • \n

    For a CloudWatch log group, provide the name of the CloudWatch log group, with key\n logGroup. The following example specifies a log group named\n alert-log-group:

    \n

    \n \"LogDestination\": { \"logGroup\": \"alert-log-group\" }\n

    \n
  • \n
  • \n

    For a Firehose delivery stream, provide the name of the delivery stream, with key\n deliveryStream. The following example specifies a delivery stream\n named alert-delivery-stream:

    \n

    \n \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\"\n }\n

    \n
  • \n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Defines where Network Firewall sends logs for the firewall for one log type. This is used\n in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

\n

Network Firewall generates logs for stateful rule groups. You can save alert and flow log\n types. The stateful rules engine records flow logs for all network traffic that it receives.\n It records alert logs for traffic that matches stateful rules that have the rule\n action set to DROP or ALERT.

" + "smithy.api#documentation": "

Defines where Network Firewall sends logs for the firewall for one log type. This is used\n in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.

\n

Network Firewall generates logs for stateful rule groups. You can save alert, flow, and TLS log\n types.

" } }, "com.amazonaws.networkfirewall#LogDestinationConfigs": { @@ -3167,6 +3167,12 @@ "traits": { "smithy.api#enumValue": "FLOW" } + }, + "TLS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS" + } } } }, @@ -5089,7 +5095,7 @@ } }, "traits": { - "smithy.api#documentation": "

Stateful inspection criteria for a domain list rule group.

\n

For HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.

\n

By default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and Stateful domain list rule groups in Network Firewall in the Network Firewall Developer Guide.

" + "smithy.api#documentation": "

Stateful inspection criteria for a domain list rule group.

\n

For HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.

\n

By default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and \n Stateful domain list rule groups in Network Firewall in the Network Firewall Developer Guide.

" } }, "com.amazonaws.networkfirewall#RulesString": { @@ -5310,7 +5316,7 @@ "Action": { "target": "com.amazonaws.networkfirewall#StatefulAction", "traits": { - "smithy.api#documentation": "

Defines what Network Firewall should do with the packets in a traffic flow when the flow\n matches the stateful rule criteria. For all actions, Network Firewall performs the specified\n action and discontinues stateful inspection of the traffic flow.

\n

The actions for a stateful rule are defined as follows:

\n
    \n
  • \n

    \n PASS - Permits the packets to go to the\n intended destination.

    \n
  • \n
  • \n

    \n DROP - Blocks the packets from going to\n the intended destination and sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.

    \n
  • \n
  • \n

    \n ALERT - Sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.

    \n

    You can use this action to test a rule that you intend to use to drop traffic. You\n can enable the rule with ALERT action, verify in the logs that the rule\n is filtering as you want, then change the action to DROP.

    \n
  • \n
", + "smithy.api#documentation": "

Defines what Network Firewall should do with the packets in a traffic flow when the flow\n matches the stateful rule criteria. For all actions, Network Firewall performs the specified\n action and discontinues stateful inspection of the traffic flow.

\n

The actions for a stateful rule are defined as follows:

\n
    \n
  • \n

    \n PASS - Permits the packets to go to the\n intended destination.

    \n
  • \n
  • \n

    \n DROP - Blocks the packets from going to\n the intended destination and sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.

    \n
  • \n
  • \n

    \n ALERT - Sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.

    \n

    You can use this action to test a rule that you intend to use to drop traffic. You\n can enable the rule with ALERT action, verify in the logs that the rule\n is filtering as you want, then change the action to DROP.

    \n
  • \n
  • \n

    \n REJECT - Drops traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and an RST bit contained in the TCP header flags. REJECT is available only for TCP traffic. This option doesn't support FTP or IMAP protocols.

    \n
  • \n
", "smithy.api#required": {} } }, diff --git a/models/outposts.json b/models/outposts.json index 832f6d74d7..ab20bd0f19 100644 --- a/models/outposts.json +++ b/models/outposts.json @@ -2140,6 +2140,12 @@ "members": { "InstanceType": { "target": "com.amazonaws.outposts#InstanceType" + }, + "VCPUs": { + "target": "com.amazonaws.outposts#VCPUCount", + "traits": { + "smithy.api#documentation": "

The number of default VCPUs in an instance type.

" + } } }, "traits": { @@ -6019,6 +6025,9 @@ } } }, + "com.amazonaws.outposts#VCPUCount": { + "type": "integer" + }, "com.amazonaws.outposts#ValidationException": { "type": "structure", "members": { diff --git a/models/pi.json b/models/pi.json index fc5fe125bc..379ea8093d 100644 --- a/models/pi.json +++ b/models/pi.json @@ -665,7 +665,7 @@ "Dimensions": { "target": "com.amazonaws.pi#SanitizedStringList", "traits": { - "smithy.api#documentation": "

A list of specific dimensions from a dimension group. If this parameter is not present,\n then it signifies that all of the dimensions in the group were requested, or are present in\n the response.

\n

Valid values for elements in the Dimensions array are:

\n
    \n
  • \n

    \n db.application.name - The name of the application that is connected to the database. Valid values are as follows:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.host.id - The host ID of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.host.name - The host name of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.name - The name of the database to which the client is connected. Valid values are as follows:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Aurora MySQL

      \n
    • \n
    • \n

      Amazon RDS MySQL

      \n
    • \n
    • \n

      Amazon RDS MariaDB

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.db_id - The query ID generated by the database (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.statement - The text of the query that is being run (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized_id\n

    \n
  • \n
  • \n

    \n db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.session_type.name - The type of the current session (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with\n pi- (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees\n (all engines except Amazon DocumentDB)

    \n
  • \n
  • \n

    \n db.sql.tokenized_id\n

    \n
  • \n
  • \n

    \n db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console,\n db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot\n database issues.

    \n
  • \n
  • \n

    \n db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as\n pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id =\n ? (all engines except Amazon DocumentDB)

    \n
  • \n
  • \n

    \n db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except\n Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB).

    \n
  • \n
" + "smithy.api#documentation": "

A list of specific dimensions from a dimension group. If this parameter is not present,\n then it signifies that all of the dimensions in the group were requested, or are present in\n the response.

\n

Valid values for elements in the Dimensions array are:

\n
    \n
  • \n

    \n db.application.name - The name of the application that is connected to the database. Valid values are as follows:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.host.id - The host ID of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.host.name - The host name of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.name - The name of the database to which the client is connected. Valid values are as follows:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Aurora MySQL

      \n
    • \n
    • \n

      Amazon RDS MySQL

      \n
    • \n
    • \n

      Amazon RDS MariaDB

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.db_id - The query ID generated by the database (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.statement - The text of the query that is being run (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized_id\n

    \n
  • \n
  • \n

    \n db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.session_type.name - The type of the current session (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with\n pi- (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees\n (all engines except Amazon DocumentDB)

    \n
  • \n
  • \n

    \n db.sql.tokenized_id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). The db.sql.tokenized_id dimension \n fetches the value of the db.sql_tokenized.id dimension. Amazon RDS returns db.sql.tokenized_id from the db.sql dimension group.\n

    \n
  • \n
  • \n

    \n db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console,\n db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot\n database issues.

    \n
  • \n
  • \n

    \n db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as\n pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id =\n ? (all engines except Amazon DocumentDB)

    \n
  • \n
  • \n

    \n db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except\n Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB).

    \n
  • \n
" } }, "Limit": { diff --git a/models/pinpoint-sms-voice-v2.json b/models/pinpoint-sms-voice-v2.json index 636f1cc0e6..e5717832f6 100644 --- a/models/pinpoint-sms-voice-v2.json +++ b/models/pinpoint-sms-voice-v2.json @@ -933,7 +933,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new event destination in a configuration set.

\n

An event destination is a location where you send message events. The event options\n are Amazon CloudWatch, Amazon Kinesis Data Firehose, or Amazon SNS. For example,\n when a message is delivered successfully, you can send information about that event to\n an event destination, or send notifications to endpoints that are subscribed to an\n Amazon SNS topic.

\n

Each configuration set can contain between 0 and 5 event destinations. Each event\n destination can contain a reference to a single destination, such as a CloudWatch\n or Kinesis Data Firehose destination.

" + "smithy.api#documentation": "

Creates a new event destination in a configuration set.

\n

An event destination is a location where you send message events. The event options\n are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example,\n when a message is delivered successfully, you can send information about that event to\n an event destination, or send notifications to endpoints that are subscribed to an\n Amazon SNS topic.

\n

Each configuration set can contain between 0 and 5 event destinations. Each event\n destination can contain a reference to a single destination, such as a CloudWatch\n or Firehose destination.

" } }, "com.amazonaws.pinpointsmsvoicev2#CreateEventDestinationRequest": { @@ -956,7 +956,7 @@ "MatchingEventTypes": { "target": "com.amazonaws.pinpointsmsvoicev2#EventTypeList", "traits": { - "smithy.api#documentation": "

An array of event types that determine which events to log. If \"ALL\" is used, then\n Amazon Pinpoint logs every event type.

\n \n

The TEXT_SENT event type is not supported.

\n
", + "smithy.api#documentation": "

An array of event types that determine which events to log. If \"ALL\" is used, then\n AWS End User Messaging SMS and Voice logs every event type.

\n \n

The TEXT_SENT event type is not supported.

\n
", "smithy.api#required": {} } }, @@ -969,7 +969,7 @@ "KinesisFirehoseDestination": { "target": "com.amazonaws.pinpointsmsvoicev2#KinesisFirehoseDestination", "traits": { - "smithy.api#documentation": "

An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose.

" + "smithy.api#documentation": "

An object that contains information about an event destination for logging to Amazon Data Firehose.

" } }, "SnsDestination": { @@ -1045,7 +1045,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new opt-out list.

\n

If the opt-out list name already exists, an error is returned.

\n

An opt-out list is a list of phone numbers that are opted out, meaning you can't send\n SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for\n the phone number is added to the opt-out list. In addition to STOP, your recipients can\n use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported\n opt-out keywords, see \n SMS opt out in the Amazon Pinpoint User\n Guide.

" + "smithy.api#documentation": "

Creates a new opt-out list.

\n

If the opt-out list name already exists, an error is returned.

\n

An opt-out list is a list of phone numbers that are opted out, meaning you can't send\n SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for\n the phone number is added to the opt-out list. In addition to STOP, your recipients can\n use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported\n opt-out keywords, see \n SMS opt out in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#CreateOptOutListRequest": { @@ -1149,7 +1149,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrSenderIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

", + "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

\n

After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.

", "smithy.api#required": {} } }, @@ -1163,7 +1163,7 @@ "MessageType": { "target": "com.amazonaws.pinpointsmsvoicev2#MessageType", "traits": { - "smithy.api#documentation": "

The type of message. Valid values are TRANSACTIONAL for messages that are critical or\n time-sensitive and PROMOTIONAL for messages that aren't critical or\n time-sensitive.

", + "smithy.api#documentation": "

The type of message. Valid values are TRANSACTIONAL for messages that are critical or\n time-sensitive and PROMOTIONAL for messages that aren't critical or\n time-sensitive. After the pool is created the MessageType can't be changed.

", "smithy.api#required": {} } }, @@ -1241,7 +1241,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" + "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" } }, "OptOutListName": { @@ -1254,7 +1254,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates whether shared routes are enabled for the pool.

" + "smithy.api#documentation": "

Indicates whether shared routes are enabled for the pool. Set to false and only origination identities in this pool are used to send messages.

" } }, "DeletionProtectionEnabled": { @@ -2330,7 +2330,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing keyword from an origination phone number or pool.

\n

A keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, Amazon Pinpoint responds with a customizable\n message.

\n

Keywords \"HELP\" and \"STOP\" can't be deleted or modified.

" + "smithy.api#documentation": "

Deletes an existing keyword from an origination phone number or pool.

\n

A keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable\n message.

\n

Keywords \"HELP\" and \"STOP\" can't be deleted or modified.

" } }, "com.amazonaws.pinpointsmsvoicev2#DeleteKeywordRequest": { @@ -2703,7 +2703,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" + "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" } }, "OptOutListName": { @@ -3157,7 +3157,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an account-level monthly spending limit override for sending text messages.\n Deleting a spend limit override will set the EnforcedLimit to equal the\n MaxLimit, which is controlled by Amazon Web Services. For more\n information on spend limits (quotas) see Amazon Pinpoint quotas \n in the Amazon Pinpoint Developer Guide.

" + "smithy.api#documentation": "

Deletes an account-level monthly spending limit override for sending text messages.\n Deleting a spend limit override will set the EnforcedLimit to equal the\n MaxLimit, which is controlled by Amazon Web Services. For more\n information on spend limits (quotas) see Quotas \n in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#DeleteTextMessageSpendLimitOverrideRequest": { @@ -3287,7 +3287,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an account level monthly spend limit override for sending voice messages.\n Deleting a spend limit override sets the EnforcedLimit equal to the\n MaxLimit, which is controlled by Amazon Web Services. For more\n information on spending limits (quotas) see Amazon Pinpoint quotas\n in the Amazon Pinpoint Developer Guide.

" + "smithy.api#documentation": "

Deletes an account level monthly spend limit override for sending voice messages.\n Deleting a spend limit override sets the EnforcedLimit equal to the\n MaxLimit, which is controlled by Amazon Web Services. For more\n information on spending limits (quotas) see Quotas \n in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#DeleteVoiceMessageSpendLimitOverrideRequest": { @@ -3344,7 +3344,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes attributes of your Amazon Web Services account. The supported account\n attributes include account tier, which indicates whether your account is in the sandbox\n or production environment. When you're ready to move your account out of the sandbox,\n create an Amazon Web Services Support case for a service limit increase request.

\n

New Amazon Pinpoint accounts are placed into an SMS or voice sandbox. The sandbox\n protects both Amazon Web Services end recipients and SMS or voice recipients from fraud\n and abuse.

", + "smithy.api#documentation": "

Describes attributes of your Amazon Web Services account. The supported account\n attributes include account tier, which indicates whether your account is in the sandbox\n or production environment. When you're ready to move your account out of the sandbox,\n create an Amazon Web Services Support case for a service limit increase request.

\n

New accounts are placed into an SMS or voice sandbox. The sandbox\n protects both Amazon Web Services end recipients and SMS or voice recipients from fraud\n and abuse.

", "smithy.api#paginated": { "items": "AccountAttributes" } @@ -3413,7 +3413,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your\n account. The description for a quota includes the quota name, current usage toward that\n quota, and the quota's maximum value.

\n

When you establish an Amazon Web Services account, the account has initial quotas on\n the maximum number of configuration sets, opt-out lists, phone numbers, and pools that\n you can create in a given Region. For more information see \n Amazon Pinpoint quotas in the Amazon Pinpoint Developer\n Guide.

", + "smithy.api#documentation": "

Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for your\n account. The description for a quota includes the quota name, current usage toward that\n quota, and the quota's maximum value.

\n

When you establish an Amazon Web Services account, the account has initial quotas on\n the maximum number of configuration sets, opt-out lists, phone numbers, and pools that\n you can create in a given Region. For more information see Quotas \n in the AWS End User Messaging SMS User Guide.

", "smithy.api#paginated": { "items": "AccountLimits" } @@ -3569,7 +3569,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the specified keywords or all keywords on your origination phone number or\n pool.

\n

A keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, Amazon Pinpoint responds with a customizable\n message.

\n

If you specify a keyword that isn't valid, an error is returned.

", + "smithy.api#documentation": "

Describes the specified keywords or all keywords on your origination phone number or\n pool.

\n

A keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable\n message.

\n

If you specify a keyword that isn't valid, an error is returned.

", "smithy.api#paginated": { "items": "Keywords" } @@ -4849,7 +4849,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the current Amazon Pinpoint monthly spend limits for sending voice and\n text messages.

\n

When you establish an Amazon Web Services account, the account has initial monthly\n spend limit in a given Region. For more information on increasing your monthly spend\n limit, see \n Requesting increases to your monthly SMS spending quota for Amazon Pinpoint\n in the Amazon Pinpoint User Guide.

", + "smithy.api#documentation": "

Describes the current monthly spend limits for sending voice and\n text messages.

\n

When you establish an Amazon Web Services account, the account has initial monthly\n spend limit in a given Region. For more information on increasing your monthly spend\n limit, see \n Requesting increases to your monthly SMS, MMS, or Voice spending quota\n in the AWS End User Messaging SMS User Guide.

", "smithy.api#paginated": { "items": "SpendLimits" } @@ -5355,7 +5355,7 @@ "KinesisFirehoseDestination": { "target": "com.amazonaws.pinpointsmsvoicev2#KinesisFirehoseDestination", "traits": { - "smithy.api#documentation": "

An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose.

" + "smithy.api#documentation": "

An object that contains information about an event destination for logging to Amazon Data Firehose.

" } }, "SnsDestination": { @@ -5366,7 +5366,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information about an event destination.

\n

Event destinations are associated with configuration sets, which enable you to publish\n message sending events to CloudWatch, Kinesis Data Firehose, or Amazon SNS.

" + "smithy.api#documentation": "

Contains information about an event destination.

\n

Event destinations are associated with configuration sets, which enable you to publish\n message sending events to CloudWatch, Firehose, or Amazon SNS.

" } }, "com.amazonaws.pinpointsmsvoicev2#EventDestinationList": { @@ -5731,7 +5731,7 @@ "CountryRuleSet": { "target": "com.amazonaws.pinpointsmsvoicev2#ProtectConfigurationCountryRuleSet", "traits": { - "smithy.api#documentation": "

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide.

", + "smithy.api#documentation": "

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide.

", "smithy.api#required": {} } } @@ -5914,7 +5914,7 @@ "IamRoleArn": { "target": "com.amazonaws.pinpointsmsvoicev2#IamRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of an Identity and Access Management role that is able to write\n event data to an Amazon Kinesis Data Firehose destination.

", + "smithy.api#documentation": "

The ARN of an Identity and Access Management role that is able to write\n event data to an Amazon Data Firehose destination.

", "smithy.api#required": {} } }, @@ -5927,7 +5927,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the delivery stream Amazon Resource Name (ARN), and the ARN of the Identity and Access Management (IAM) role associated with a Kinesis Data Firehose event\n destination.

\n

Event destinations, such as Kinesis Data Firehose, are associated with configuration\n sets, which enable you to publish message sending events.

" + "smithy.api#documentation": "

Contains the delivery stream Amazon Resource Name (ARN), and the ARN of the Identity and Access Management (IAM) role associated with a Firehose event\n destination.

\n

Event destinations, such as Firehose, are associated with configuration\n sets, which enable you to publish message sending events.

" } }, "com.amazonaws.pinpointsmsvoicev2#LanguageCode": { @@ -6865,7 +6865,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When set to false an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, Amazon Pinpoint automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out request. For more information see Self-managed opt-outs\n

", + "smithy.api#documentation": "

When set to false an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out request. For more information see Self-managed opt-outs\n

", "smithy.api#required": {} } }, @@ -7195,7 +7195,7 @@ "name": "sms-voice" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "

Welcome to the Amazon Pinpoint SMS and Voice, version 2 API Reference.\n This guide provides information about Amazon Pinpoint SMS and Voice, version 2 API\n resources, including supported HTTP methods, parameters, and schemas.

\n

Amazon Pinpoint is an Amazon Web Services service that you can use to engage with\n your recipients across multiple messaging channels. The Amazon Pinpoint SMS and\n Voice, version 2 API provides programmatic access to options that are unique to the SMS\n and voice channels. Amazon Pinpoint SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.

\n

If you're new to Amazon Pinpoint SMS, it's also helpful to review the \n Amazon Pinpoint SMS User Guide. The Amazon Pinpoint\n Developer Guide provides tutorials, code samples, and procedures that\n demonstrate how to use Amazon Pinpoint SMS features programmatically and how to integrate\n Amazon Pinpoint functionality into mobile apps and other types of applications.\n The guide also provides key information, such as Amazon Pinpoint integration with\n other Amazon Web Services services, and the quotas that apply to use of the\n service.

\n

\n Regional availability\n

\n

The Amazon Pinpoint SMS and Voice, version 2 API Reference is\n available in several Amazon Web Services Regions and it provides an endpoint for each of\n these Regions. For a list of all the Regions and endpoints where the API is currently\n available, see Amazon Web Services Service Endpoints and Amazon Pinpoint\n endpoints and quotas in the Amazon Web Services General Reference. To\n learn more about Amazon Web Services Regions, see Managing\n Amazon Web Services Regions in the Amazon Web Services General\n Reference.

\n

In each Region, Amazon Web Services maintains multiple Availability Zones. These\n Availability Zones are physically isolated from each other, but are united by private,\n low-latency, high-throughput, and highly redundant network connections. These\n Availability Zones enable us to provide very high levels of availability and redundancy,\n while also minimizing latency. To learn more about the number of Availability Zones that\n are available in each Region, see Amazon Web Services\n Global Infrastructure.\n

", + "smithy.api#documentation": "

Welcome to the AWS End User Messaging SMS and Voice, version 2 API Reference.\n This guide provides information about AWS End User Messaging SMS and Voice, version 2 API\n resources, including supported HTTP methods, parameters, and schemas.

\n

Amazon Pinpoint is an Amazon Web Services service that you can use to engage with\n your recipients across multiple messaging channels. The AWS End User Messaging SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS\n and voice channels. AWS End User Messaging SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.

\n

If you're new to AWS End User Messaging SMS and Voice, it's also helpful to review the \n AWS End User Messaging SMS User Guide. The AWS End User Messaging SMS User Guide\n provides tutorials, code samples, and procedures that\n demonstrate how to use AWS End User Messaging SMS and Voice features programmatically and how to integrate\n functionality into mobile apps and other types of applications.\n The guide also provides key information, such as AWS End User Messaging SMS and Voice integration with\n other Amazon Web Services services, and the quotas that apply to use of the\n service.

\n

\n Regional availability\n

\n

The AWS End User Messaging SMS and Voice version 2 API Reference is\n available in several Amazon Web Services Regions and it provides an endpoint for each of\n these Regions. For a list of all the Regions and endpoints where the API is currently\n available, see Amazon Web Services Service Endpoints and Amazon Pinpoint\n endpoints and quotas in the Amazon Web Services General Reference. To\n learn more about Amazon Web Services Regions, see Managing\n Amazon Web Services Regions in the Amazon Web Services General\n Reference.

\n

In each Region, Amazon Web Services maintains multiple Availability Zones. These\n Availability Zones are physically isolated from each other, but are united by private,\n low-latency, high-throughput, and highly redundant network connections. These\n Availability Zones enable us to provide very high levels of availability and redundancy,\n while also minimizing latency. To learn more about the number of Availability Zones that\n are available in each Region, see Amazon Web Services\n Global Infrastructure.\n

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8110,7 +8110,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When set to false, an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, Amazon Pinpoint automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out requests. For more information see Self-managed opt-outs\n

", + "smithy.api#documentation": "

When set to false, an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out requests. For more information see Self-managed opt-outs\n

", "smithy.api#required": {} } }, @@ -8125,7 +8125,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Allows you to enable shared routes on your pool.

\n

By default, this is set to False. If you set this value to\n True, your messages are sent using phone numbers or sender IDs\n (depending on the country) that are shared with other Amazon Pinpoint users. In some\n countries, such as the United States, senders aren't allowed to use shared routes and\n must use a dedicated phone number or short code.

", + "smithy.api#documentation": "

Allows you to enable shared routes on your pool.

\n

By default, this is set to False. If you set this value to\n True, your messages are sent using phone numbers or sender IDs\n (depending on the country) that are shared with other users. In some\n countries, such as the United States, senders aren't allowed to use shared routes and\n must use a dedicated phone number or short code.

", "smithy.api#required": {} } }, @@ -8445,7 +8445,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates a keyword configuration on an origination phone number or\n pool.

\n

A keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, Amazon Pinpoint responds with a customizable\n message.

\n

If you specify a keyword that isn't valid, an error is returned.

" + "smithy.api#documentation": "

Creates or updates a keyword configuration on an origination phone number or\n pool.

\n

A keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable\n message.

\n

If you specify a keyword that isn't valid, an error is returned.

" } }, "com.amazonaws.pinpointsmsvoicev2#PutKeywordRequest": { @@ -9913,7 +9913,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" + "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" } }, "OptOutListName": { @@ -10082,7 +10082,7 @@ } ], "traits": { - "smithy.api#documentation": "

Request an origination phone number for use in your account. For more information on\n phone number request see Requesting a\n number in the Amazon Pinpoint User Guide.

" + "smithy.api#documentation": "

Request an origination phone number for use in your account. For more information on\n phone number request see Request a phone number in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#RequestPhoneNumberRequest": { @@ -10238,7 +10238,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" + "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" } }, "OptOutListName": { @@ -10900,7 +10900,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new text message and sends it to a recipient's phone number.

\n

SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit\n depends on the destination country of your messages, as well as the type of phone number\n (origination number) that you use to send the message. For more information, see Message Parts per\n Second (MPS) limits in the Amazon Pinpoint User\n Guide.

" + "smithy.api#documentation": "

Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked.

\n

SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit\n depends on the destination country of your messages, as well as the type of phone number\n (origination number) that you use to send the message. For more information about MPS, see Message Parts per\n Second (MPS) limits in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#SendTextMessageRequest": { @@ -10946,13 +10946,13 @@ "MaxPrice": { "target": "com.amazonaws.pinpointsmsvoicev2#MaxPrice", "traits": { - "smithy.api#documentation": "

The maximum amount that you want to spend, in US dollars, per each text message part.\n A text message can contain multiple parts.

" + "smithy.api#documentation": "

The maximum amount that you want to spend, in US dollars, per each text message. If the calculated amount to send the text message is greater than MaxPrice, the message is not sent and an error is returned.

" } }, "TimeToLive": { "target": "com.amazonaws.pinpointsmsvoicev2#TimeToLive", "traits": { - "smithy.api#documentation": "

How long the text message is valid for. By default this is 72 hours.

" + "smithy.api#documentation": "

How long the text message is valid for, in seconds. By default this is 72 hours. If the messages isn't handed off before the TTL expires we stop attempting to hand off the message and return TTL_EXPIRED event.

" } }, "Context": { @@ -10964,14 +10964,14 @@ "DestinationCountryParameters": { "target": "com.amazonaws.pinpointsmsvoicev2#DestinationCountryParameters", "traits": { - "smithy.api#documentation": "

This field is used for any country-specific registration requirements. Currently, this\n setting is only used when you send messages to recipients in India using a sender ID.\n For more information see Special requirements for sending SMS messages to recipients in India.\n

" + "smithy.api#documentation": "

This field is used for any country-specific registration requirements. Currently, this\n setting is only used when you send messages to recipients in India using a sender ID.\n For more information see Special requirements for sending SMS messages to recipients in India.\n

\n
    \n
  • \n

    \n IN_ENTITY_ID The entity ID or Principal\n Entity (PE) ID that you received after completing the sender ID\n registration process.

    \n
  • \n
  • \n

    \n IN_TEMPLATE_ID The template ID that you\n received after completing the sender ID registration\n process.

    \n \n

    Make sure that the Template ID that you specify matches\n your message template exactly. If your message doesn't match\n the template that you provided during the registration\n process, the mobile carriers might reject your\n message.

    \n
    \n
  • \n
" } }, "DryRun": { "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When set to true, the message is checked and validated, but isn't sent to the end\n recipient.

" + "smithy.api#documentation": "

When set to true, the message is checked and validated, but isn't sent to the end\n recipient. You are not charged for using DryRun.

\n

The Message Parts per Second (MPS) limit when using DryRun is five. If\n your origination identity has a lower MPS limit then the lower MPS limit is used. For\n more information about MPS limits, see Message Parts per\n Second (MPS) limits in the AWS End User Messaging SMS User Guide..

" } }, "ProtectConfigurationId": { @@ -11031,7 +11031,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to send a request that sends a voice message through Amazon Pinpoint.\n This operation uses Amazon Polly to\n convert a text script into a voice message.

" + "smithy.api#documentation": "

Allows you to send a request that sends a voice message.\n This operation uses Amazon Polly to\n convert a text script into a voice message.

" } }, "com.amazonaws.pinpointsmsvoicev2#SendVoiceMessageRequest": { @@ -11154,7 +11154,7 @@ } }, "traits": { - "smithy.api#documentation": "

The alphanumeric sender ID in a specific country that you want to describe. For more\n information on sender IDs see Requesting\n sender IDs for SMS messaging with Amazon Pinpoint\n in the Amazon Pinpoint User Guide.

" + "smithy.api#documentation": "

The alphanumeric sender ID in a specific country that you want to describe. For more\n information on sender IDs see Requesting\n sender IDs in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#SenderIdFilter": { @@ -11872,7 +11872,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the current Amazon Pinpoint monthly spend limits for sending voice and\n text messages. For more information on increasing your monthly spend limit, see \n Requesting increases to your monthly SMS spending quota for Amazon Pinpoint\n in the Amazon Pinpoint User Guide.

" + "smithy.api#documentation": "

Describes the current monthly spend limits for sending voice and\n text messages. For more information on increasing your monthly spend limit, see \n Requesting a spending quota increase\n in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#SpendLimitList": { @@ -12130,7 +12130,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS\n Voice, version 2 resource. When you specify an existing tag key, the value is\n overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag\n consists of a key and an optional value. Tag keys must be unique per resource. For more\n information about tags, see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer\n Guide.

" + "smithy.api#documentation": "

Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is\n overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag\n consists of a key and an optional value. Tag keys must be unique per resource. For more\n information about tags, see Tags in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#TagResourceRequest": { @@ -12289,7 +12289,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2\n resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer\n Guide.

" + "smithy.api#documentation": "

Removes the association of the specified tags from a\n resource. For more information on tags see Tags in the AWS End User Messaging SMS User Guide.

" } }, "com.amazonaws.pinpointsmsvoicev2#UntagResourceRequest": { @@ -12350,7 +12350,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an existing event destination in a configuration set. You can update the\n IAM role ARN for CloudWatch Logs and Kinesis Data Firehose. You can\n also enable or disable the event destination.

\n

You may want to update an event destination to change its matching event types or\n updating the destination resource ARN. You can't change an event destination's type\n between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS.

" + "smithy.api#documentation": "

Updates an existing event destination in a configuration set. You can update the\n IAM role ARN for CloudWatch Logs and Firehose. You can\n also enable or disable the event destination.

\n

You may want to update an event destination to change its matching event types or\n updating the destination resource ARN. You can't change an event destination's type\n between CloudWatch Logs, Firehose, and Amazon SNS.

" } }, "com.amazonaws.pinpointsmsvoicev2#UpdateEventDestinationRequest": { @@ -12391,7 +12391,7 @@ "KinesisFirehoseDestination": { "target": "com.amazonaws.pinpointsmsvoicev2#KinesisFirehoseDestination", "traits": { - "smithy.api#documentation": "

An object that contains information about an event destination for logging to Kinesis Data Firehose.

" + "smithy.api#documentation": "

An object that contains information about an event destination for logging to Firehose.

" } }, "SnsDestination": { @@ -12494,7 +12494,7 @@ "SelfManagedOptOutsEnabled": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" + "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" } }, "OptOutListName": { @@ -12690,7 +12690,7 @@ "SelfManagedOptOutsEnabled": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" + "smithy.api#documentation": "

By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.

" } }, "OptOutListName": { @@ -12766,7 +12766,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When an end recipient sends a message that begins with HELP or STOP to one of your\n dedicated numbers, Amazon Pinpoint automatically replies with a customizable message\n and adds the end recipient to the OptOutList. When set to true you're responsible for\n responding to HELP and STOP requests. You're also responsible for tracking and honoring\n opt-out requests.

" + "smithy.api#documentation": "

When an end recipient sends a message that begins with HELP or STOP to one of your\n dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message\n and adds the end recipient to the OptOutList. When set to true you're responsible for\n responding to HELP and STOP requests. You're also responsible for tracking and honoring\n opt-out requests.

" } }, "OptOutListName": { @@ -12878,7 +12878,7 @@ "CountryRuleSetUpdates": { "target": "com.amazonaws.pinpointsmsvoicev2#ProtectConfigurationCountryRuleSet", "traits": { - "smithy.api#documentation": "

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide.

", + "smithy.api#documentation": "

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide.

", "smithy.api#required": {} } } diff --git a/models/quicksight.json b/models/quicksight.json index f169298e86..acb0b1f6ab 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -8237,6 +8237,9 @@ { "target": "com.amazonaws.quicksight#ConflictException" }, + { + "target": "com.amazonaws.quicksight#CustomerManagedKeyUnavailableException" + }, { "target": "com.amazonaws.quicksight#InternalFailureException" }, @@ -10995,6 +10998,25 @@ "smithy.api#documentation": "

The configuration of custom values for the destination parameter in DestinationParameterValueConfiguration.

" } }, + "com.amazonaws.quicksight#CustomerManagedKeyUnavailableException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.quicksight#String" + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The customer managed key that is registered to your Amazon QuickSight account is unavailable.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.quicksight#Dashboard": { "type": "structure", "members": { @@ -52022,6 +52044,9 @@ { "target": "com.amazonaws.quicksight#ConflictException" }, + { + "target": "com.amazonaws.quicksight#CustomerManagedKeyUnavailableException" + }, { "target": "com.amazonaws.quicksight#InternalFailureException" }, diff --git a/models/rds.json b/models/rds.json index 902fc849c7..15ce53cad1 100644 --- a/models/rds.json +++ b/models/rds.json @@ -6134,6 +6134,12 @@ "smithy.api#required": {} } }, + "MinACU": { + "target": "com.amazonaws.rds#DoubleOptional", + "traits": { + "smithy.api#documentation": "

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" + } + }, "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { @@ -11096,6 +11102,12 @@ "smithy.api#documentation": "

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" } }, + "MinACU": { + "target": "com.amazonaws.rds#DoubleOptional", + "traits": { + "smithy.api#documentation": "

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" + } + }, "ComputeRedundancy": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { @@ -12137,19 +12149,19 @@ "SkipFinalSnapshot": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

Specifies whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted.\n If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot \n is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. \n By default, this parameter is disabled.

\n \n

You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled.

\n
" + "smithy.api#documentation": "

Specifies whether to skip the creation of a final DB cluster snapshot before RDS\n deletes the DB cluster. If you set this value to true, RDS doesn't create a\n final DB cluster snapshot. If you set this value to false or don't specify\n it, RDS creates a DB cluster snapshot before it deletes the DB cluster. By default, this\n parameter is disabled, so RDS creates a final DB cluster snapshot.

\n \n

If SkipFinalSnapshot is disabled, you must specify a value for the\n FinalDBSnapshotIdentifier parameter.

\n
" } }, "FinalDBSnapshotIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot\n is disabled.

\n \n

Specifying this parameter and also skipping the creation of a final DB cluster snapshot \n with the SkipFinalShapshot parameter results in an error.

\n
\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 letters, numbers, or hyphens.

    \n
  • \n
  • \n

    First character must be a letter

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens

    \n
  • \n
" + "smithy.api#documentation": "

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot\n is disabled.

\n \n

If you specify this parameter and also skip the creation of a final DB cluster\n snapshot with the SkipFinalShapshot parameter, the request results in\n an error.

\n
\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 letters, numbers, or hyphens.

    \n
  • \n
  • \n

    First character must be a letter

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens

    \n
  • \n
" } }, "DeleteAutomatedBackups": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.\n

\n \n

You must delete automated backups for Amazon RDS Multi-AZ DB clusters. For more information about managing automated backups for RDS Multi-AZ DB clusters, see Managing automated backups.

\n
" + "smithy.api#documentation": "

Specifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.

" } } }, @@ -13909,7 +13921,7 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" + "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n customer\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" } }, "Filters": { @@ -22882,6 +22894,12 @@ "traits": { "smithy.api#documentation": "

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" } + }, + "MinACU": { + "target": "com.amazonaws.rds#DoubleOptional", + "traits": { + "smithy.api#documentation": "

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" + } } }, "traits": { @@ -23780,13 +23798,13 @@ "DBSecurityGroupMemberships": { "target": "com.amazonaws.rds#DBSecurityGroupNameList", "traits": { - "smithy.api#documentation": "

A list of DBSecurityGroupMembership name strings used for this option.

" + "smithy.api#documentation": "

A list of DB security groups used for this option.

" } }, "VpcSecurityGroupMemberships": { "target": "com.amazonaws.rds#VpcSecurityGroupIdList", "traits": { - "smithy.api#documentation": "

A list of VpcSecurityGroupMembership name strings used for this option.

" + "smithy.api#documentation": "

A list of VPC security group names used for this option.

" } }, "OptionSettings": { @@ -23797,7 +23815,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of all available options

" + "smithy.api#documentation": "

A list of all available options for an option group.

" } }, "com.amazonaws.rds#OptionConfigurationList": { @@ -30782,7 +30800,7 @@ } }, "traits": { - "smithy.api#documentation": "

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

\n

For more information, see\n Tagging Amazon RDS Resources in the Amazon RDS User Guide\n or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.\n

" + "smithy.api#documentation": "

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

\n

For more information, see\n Tagging Amazon RDS resources in the Amazon RDS User Guide or \n Tagging Amazon Aurora and Amazon RDS resources in the Amazon Aurora User Guide.\n

" } }, "com.amazonaws.rds#TagList": { @@ -30794,7 +30812,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of tags.\n For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

A list of tags.

\n

For more information, see\n Tagging Amazon RDS resources in the Amazon RDS User Guide or \n Tagging Amazon Aurora and Amazon RDS resources in the Amazon Aurora User Guide.\n

" } }, "com.amazonaws.rds#TagListMessage": { diff --git a/models/redshift-serverless.json b/models/redshift-serverless.json index bf66f50b8c..3cdf696223 100644 --- a/models/redshift-serverless.json +++ b/models/redshift-serverless.json @@ -550,7 +550,7 @@ "roleArn": { "target": "com.amazonaws.redshiftserverless#IamRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Cluster Management Guide

", + "smithy.api#documentation": "

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Management Guide

", "smithy.api#required": {} } }, @@ -904,6 +904,9 @@ { "target": "com.amazonaws.redshiftserverless#InternalServerException" }, + { + "target": "com.amazonaws.redshiftserverless#Ipv6CidrBlockNotFoundException" + }, { "target": "com.amazonaws.redshiftserverless#ResourceNotFoundException" }, @@ -989,6 +992,12 @@ "traits": { "smithy.api#documentation": "

The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.

" } + }, + "ipAddressType": { + "target": "com.amazonaws.redshiftserverless#IpAddressType", + "traits": { + "smithy.api#documentation": "

The IP address type that the workgroup supports. Possible values are ipv4 and dualstack.

" + } } }, "traits": { @@ -2407,6 +2416,28 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.redshiftserverless#IpAddressType": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(ipv4|dualstack)$" + } + }, + "com.amazonaws.redshiftserverless#Ipv6CidrBlockNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

There are no subnets in your VPC with associated IPv6 CIDR blocks. To use dual-stack mode, associate an IPv6 CIDR block with each subnet in your VPC.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.redshiftserverless#KmsKeyId": { "type": "string" }, @@ -3589,6 +3620,12 @@ "traits": { "smithy.api#documentation": "

The availability Zone.

" } + }, + "ipv6Address": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The IPv6 address of the network interface within the subnet.

" + } } }, "traits": { @@ -5166,7 +5203,7 @@ "roleArn": { "target": "com.amazonaws.redshiftserverless#IamRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Cluster Management Guide

" + "smithy.api#documentation": "

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Management Guide

" } }, "state": { @@ -6195,7 +6232,7 @@ "roleArn": { "target": "com.amazonaws.redshiftserverless#IamRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Cluster Management Guide

" + "smithy.api#documentation": "

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Management Guide

" } }, "enabled": { @@ -6452,6 +6489,9 @@ { "target": "com.amazonaws.redshiftserverless#InternalServerException" }, + { + "target": "com.amazonaws.redshiftserverless#Ipv6CidrBlockNotFoundException" + }, { "target": "com.amazonaws.redshiftserverless#ResourceNotFoundException" }, @@ -6520,6 +6560,12 @@ "traits": { "smithy.api#documentation": "

The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.

" } + }, + "ipAddressType": { + "target": "com.amazonaws.redshiftserverless#IpAddressType", + "traits": { + "smithy.api#documentation": "

The IP address type that the workgroup supports. Possible values are ipv4 and dualstack.

" + } } }, "traits": { @@ -6895,6 +6941,12 @@ "traits": { "smithy.api#documentation": "

A list of VPCs. Each entry is the unique identifier of a virtual private cloud with access to Amazon Redshift Serverless. If all of the VPCs for the grantee are allowed, it shows an asterisk.

" } + }, + "ipAddressType": { + "target": "com.amazonaws.redshiftserverless#IpAddressType", + "traits": { + "smithy.api#documentation": "

The IP address type that the workgroup supports. Possible values are ipv4 and dualstack.

" + } } }, "traits": { diff --git a/models/resiliencehub.json b/models/resiliencehub.json index f02d0058b0..fd69f005fa 100644 --- a/models/resiliencehub.json +++ b/models/resiliencehub.json @@ -1,6 +1,111 @@ { "smithy": "2.0", "shapes": { + "com.amazonaws.resiliencehub#AcceptGroupingRecommendationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#AcceptGroupingRecommendationEntry" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 30 + } + } + }, + "com.amazonaws.resiliencehub#AcceptGroupingRecommendationEntry": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the grouping recommendation.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the grouping recommendation you have accepted to include in your application.

" + } + }, + "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Accepts the resource grouping recommendations suggested by Resilience Hub for your application.

", + "smithy.api#http": { + "method": "POST", + "uri": "/accept-resource-grouping-recommendations", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "entries": { + "target": "com.amazonaws.resiliencehub#AcceptGroupingRecommendationEntries", + "traits": { + "smithy.api#documentation": "

Indicates the list of resource grouping recommendations you want to include in your application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsResponse": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "failedEntries": { + "target": "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries", + "traits": { + "smithy.api#documentation": "

Indicates the list of resource grouping recommendations that could not be included in your application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#AccessDeniedException": { "type": "structure", "members": { @@ -35,6 +140,9 @@ { "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" }, + { + "target": "com.amazonaws.resiliencehub#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.resiliencehub#ThrottlingException" }, @@ -43,7 +151,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of resources suported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide.

", + "smithy.api#documentation": "

Adds the source of resource-maps to the draft version of an application. During\n assessment, Resilience Hub will use these resource-maps to resolve the latest physical\n ID for each resource in the application template. For more information about different types\n of resources supported by Resilience Hub and how to add them in your application, see\n Step\n 2: How is your application managed? in the Resilience Hub User Guide.

", "smithy.api#http": { "method": "POST", "uri": "/add-draft-app-version-resource-mappings", @@ -470,6 +578,12 @@ "traits": { "smithy.api#documentation": "

Indicates if compliance drifts (deviations) were detected while running an assessment for\n your application.

" } + }, + "summary": { + "target": "com.amazonaws.resiliencehub#AssessmentSummary", + "traits": { + "smithy.api#documentation": "

Indicates a concise summary that provides an overview of the Resilience Hub assessment.

" + } } }, "traits": { @@ -553,7 +667,7 @@ "complianceStatus": { "target": "com.amazonaws.resiliencehub#ComplianceStatus", "traits": { - "smithy.api#documentation": "

TCurrent status of compliance for the resiliency policy.

" + "smithy.api#documentation": "

Current\n status of compliance for the resiliency policy.

" } }, "cost": { @@ -611,6 +725,14 @@ { "value": "ChangesDetected", "name": "CHANGES_DETECTED" + }, + { + "value": "NotApplicable", + "name": "NOT_APPLICABLE" + }, + { + "value": "MissingPolicy", + "name": "MISSING_POLICY" } ] } @@ -619,7 +741,7 @@ "type": "structure", "members": { "name": { - "target": "com.amazonaws.resiliencehub#String255", + "target": "com.amazonaws.resiliencehub#EntityName255", "traits": { "smithy.api#documentation": "

Name of the Application Component.

", "smithy.api#required": {} @@ -633,7 +755,7 @@ } }, "id": { - "target": "com.amazonaws.resiliencehub#String255", + "target": "com.amazonaws.resiliencehub#EntityName255", "traits": { "smithy.api#documentation": "

Identifier of the Application Component.

" } @@ -966,6 +1088,38 @@ ] } }, + "com.amazonaws.resiliencehub#AssessmentRiskRecommendation": { + "type": "structure", + "members": { + "risk": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the description of the potential risk identified in the application as part of the Resilience Hub assessment.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + }, + "recommendation": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the recommendation provided by the Resilience Hub to address the identified\n risks in the application.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + }, + "appComponents": { + "target": "com.amazonaws.resiliencehub#AppComponentNameList", + "traits": { + "smithy.api#documentation": "

Indicates the Application Components (AppComponents) that were assessed as part of the\n assessnent and are associated with the identified risk and recommendation.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates a specific risk identified in the Resilience Hub assessment and the corresponding recommendation provided to address that risk.

\n \n

The assessment summary generated by large language models (LLMs) on Amazon Bedrock are only suggestions. \n The current level of generative AI technology is not perfect and LLMs are not infallible. \n Bias and incorrect answers, although rare, should be expected. Review each recommendation in the assessment summary before you use the output from an LLM.\n

\n
\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + }, + "com.amazonaws.resiliencehub#AssessmentRiskRecommendationList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#AssessmentRiskRecommendation" + } + }, "com.amazonaws.resiliencehub#AssessmentStatus": { "type": "string", "traits": { @@ -1001,6 +1155,26 @@ } } }, + "com.amazonaws.resiliencehub#AssessmentSummary": { + "type": "structure", + "members": { + "summary": { + "target": "com.amazonaws.resiliencehub#String500", + "traits": { + "smithy.api#documentation": "

Indicates a concise summary that provides an overview of the Resilience Hub assessment.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + }, + "riskRecommendations": { + "target": "com.amazonaws.resiliencehub#AssessmentRiskRecommendationList", + "traits": { + "smithy.api#documentation": "

Indicates the top risks and recommendations identified by the Resilience Hub assessment, \n each representing a specific risk and the corresponding recommendation to address it.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the AI-generated summary for the Resilience Hub assessment, providing a concise overview that highlights the top risks and recommendations.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + } + }, "com.amazonaws.resiliencehub#AwsRegion": { "type": "string", "traits": { @@ -1011,6 +1185,9 @@ "type": "service", "version": "2020-04-30", "operations": [ + { + "target": "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendations" + }, { "target": "com.amazonaws.resiliencehub#AddDraftAppVersionResourceMappings" }, @@ -1080,6 +1257,9 @@ { "target": "com.amazonaws.resiliencehub#DescribeResiliencyPolicy" }, + { + "target": "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTask" + }, { "target": "com.amazonaws.resiliencehub#ImportResourcesToDraftAppVersion" }, @@ -1125,6 +1305,9 @@ { "target": "com.amazonaws.resiliencehub#ListResiliencyPolicies" }, + { + "target": "com.amazonaws.resiliencehub#ListResourceGroupingRecommendations" + }, { "target": "com.amazonaws.resiliencehub#ListSopRecommendations" }, @@ -1146,6 +1329,9 @@ { "target": "com.amazonaws.resiliencehub#PutDraftAppVersionTemplate" }, + { + "target": "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendations" + }, { "target": "com.amazonaws.resiliencehub#RemoveDraftAppVersionResourceMappings" }, @@ -1155,6 +1341,9 @@ { "target": "com.amazonaws.resiliencehub#StartAppAssessment" }, + { + "target": "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTask" + }, { "target": "com.amazonaws.resiliencehub#TagResource" }, @@ -2147,7 +2336,7 @@ "requestEntries": { "target": "com.amazonaws.resiliencehub#UpdateRecommendationStatusRequestEntries", "traits": { - "smithy.api#documentation": "

Defines the list of operational recommendations that need to be included or excluded.

", + "smithy.api#documentation": "

Defines the list of operational recommendations that need to be included or\n excluded.

", "smithy.api#required": {} } } @@ -2176,7 +2365,7 @@ "failedEntries": { "target": "com.amazonaws.resiliencehub#BatchUpdateRecommendationStatusFailedEntries", "traits": { - "smithy.api#documentation": "

A list of items with error details about each item, which could not be included or excluded.

", + "smithy.api#documentation": "

A list of items with error details about each item, which could not be included or\n excluded.

", "smithy.api#required": {} } } @@ -2306,7 +2495,7 @@ "diffType": { "target": "com.amazonaws.resiliencehub#DifferenceType", "traits": { - "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.

" + "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only\n NotEqual difference type.

" } } }, @@ -2331,6 +2520,14 @@ { "value": "PolicyMet", "name": "POLICY_MET" + }, + { + "value": "NotApplicable", + "name": "NOT_APPLICABLE" + }, + { + "value": "MissingPolicy", + "name": "MISSING_POLICY" } ] } @@ -2594,7 +2791,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Resilience Hub application. An Resilience Hub application is a\n collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application,\n you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate\n resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information about the number of resources supported per application, see Service\n quotas.

\n

After you create an Resilience Hub application, you publish it so that you can run a resiliency\n assessment on it. You can then use recommendations from the assessment to improve resiliency\n by running another assessment, comparing results, and then iterating the process until you\n achieve your goals for recovery time objective (RTO) and recovery point objective\n (RPO).

", + "smithy.api#documentation": "

Creates an Resilience Hub application. An Resilience Hub application is a\n collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an\n application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate\n resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information\n about the number of resources supported per application, see Service\n quotas.

\n

After you create an Resilience Hub application, you publish it so that you can run\n a resiliency assessment on it. You can then use recommendations from the assessment to improve\n resiliency by running another assessment, comparing results, and then iterating the process\n until you achieve your goals for recovery time objective (RTO) and recovery point objective\n (RPO).

", "smithy.api#http": { "method": "POST", "uri": "/create-app", @@ -2640,7 +2837,7 @@ "assessmentSchedule": { "target": "com.amazonaws.resiliencehub#AppAssessmentScheduleType", "traits": { - "smithy.api#documentation": "

\n Assessment execution schedule with 'Daily' or 'Disabled' values.\n

" + "smithy.api#documentation": "

Assessment execution schedule with 'Daily' or 'Disabled' values.

" } }, "permissionModel": { @@ -2652,7 +2849,7 @@ "eventSubscriptions": { "target": "com.amazonaws.resiliencehub#EventSubscriptionList", "traits": { - "smithy.api#documentation": "

The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports only Drift detected and Scheduled assessment failure events notification.

" + "smithy.api#documentation": "

The list of events you would like to subscribe and get notification for. Currently,\n Resilience Hub supports only Drift detected and\n Scheduled assessment failure events notification.

" } } } @@ -2701,7 +2898,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Application Component in the Resilience Hub application.

\n \n

This API updates the Resilience Hub application draft version. To use this Application Component for running assessments, you must publish the Resilience Hub application using the PublishAppVersion API.

\n
", + "smithy.api#documentation": "

Creates a new Application Component in the Resilience Hub application.

\n \n

This API updates the Resilience Hub application draft version. To use this\n Application Component for running assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

\n
", "smithy.api#http": { "method": "POST", "uri": "/create-app-version-app-component", @@ -2811,7 +3008,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds a resource to the Resilience Hub application and assigns it to the specified\n Application Components. If you specify a new Application Component, Resilience Hub will automatically\n create the Application Component.

\n \n
    \n
  • \n

    This action has no effect outside Resilience Hub.

    \n
  • \n
  • \n

    This API updates the Resilience Hub application draft version. To use this resource\n for running resiliency assessments, you must publish the Resilience Hub application using\n the PublishAppVersion API.

    \n
  • \n
  • \n

    To update application version with new physicalResourceID, you must\n call ResolveAppVersionResources API.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Adds a resource to the Resilience Hub application and assigns it to the specified\n Application Components. If you specify a new Application Component, Resilience Hub will\n automatically create the Application Component.

\n \n
    \n
  • \n

    This action has no effect outside Resilience Hub.

    \n
  • \n
  • \n

    This API updates the Resilience Hub application draft version. To use this\n resource for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

    \n
  • \n
  • \n

    To update application version with new physicalResourceID, you must\n call ResolveAppVersionResources API.

    \n
  • \n
\n
", "smithy.api#http": { "method": "POST", "uri": "/create-app-version-resource", @@ -3051,7 +3248,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a resiliency policy for an application.

\n \n

Resilience Hub allows you to provide a value of zero for rtoInSecs and\n rpoInSecs of your resiliency policy. But, while assessing your application, the lowest possible assessment result is near zero. Hence, if you provide value\n zero for rtoInSecs and rpoInSecs, the estimated workload RTO and estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.

\n
", + "smithy.api#documentation": "

Creates a resiliency policy for an application.

\n \n

Resilience Hub allows you to provide a value of zero for rtoInSecs\n and rpoInSecs of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value zero for\n rtoInSecs and rpoInSecs, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.

\n
", "smithy.api#http": { "method": "POST", "uri": "/create-resiliency-policy", @@ -3219,7 +3416,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Resilience Hub application assessment. This is a destructive action that can't\n be undone.

", + "smithy.api#documentation": "

Deletes an Resilience Hub application assessment. This is a destructive action\n that can't be undone.

", "smithy.api#http": { "method": "POST", "uri": "/delete-app-assessment", @@ -3294,7 +3491,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the input source and all of its imported resources from the Resilience Hub application.

", + "smithy.api#documentation": "

Deletes the input source and all of its imported resources from the Resilience Hub\n application.

", "smithy.api#http": { "method": "POST", "uri": "/delete-app-input-source", @@ -3315,13 +3512,13 @@ "sourceArn": { "target": "com.amazonaws.resiliencehub#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the imported resource you want to remove from the\n Resilience Hub application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the imported resource you want to remove from the\n Resilience Hub application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" } }, "terraformSource": { "target": "com.amazonaws.resiliencehub#TerraformSource", "traits": { - "smithy.api#documentation": "

The imported Terraform s3 state file you want to remove from the Resilience Hub application.

" + "smithy.api#documentation": "

The imported Terraform s3 state file you want to remove from the Resilience Hub\n application.

" } }, "clientToken": { @@ -3334,7 +3531,7 @@ "eksSourceClusterNamespace": { "target": "com.amazonaws.resiliencehub#EksSourceClusterNamespace", "traits": { - "smithy.api#documentation": "

The namespace on your Amazon Elastic Kubernetes Service cluster that you want to delete from the Resilience Hub application.

" + "smithy.api#documentation": "

The namespace on your Amazon Elastic Kubernetes Service cluster that you want to delete from the\n Resilience Hub application.

" } } } @@ -3422,7 +3619,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Application Component from the Resilience Hub application.

\n \n
    \n
  • \n

    This API updates the Resilience Hub application draft version. To use this Application Component for running assessments, you must publish the Resilience Hub application using the PublishAppVersion API.

    \n
  • \n
  • \n

    You will not be able to delete an Application Component if it has resources associated with it.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Deletes an Application Component from the Resilience Hub application.

\n \n
    \n
  • \n

    This API updates the Resilience Hub application draft version. To use this\n Application Component for running assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

    \n
  • \n
  • \n

    You will not be able to delete an Application Component if it has resources associated\n with it.

    \n
  • \n
\n
", "smithy.api#http": { "method": "POST", "uri": "/delete-app-version-app-component", @@ -3510,7 +3707,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a resource from the Resilience Hub application.

\n \n
    \n
  • \n

    You can only delete a manually added resource. To exclude non-manually added resources, use the UpdateAppVersionResource API.

    \n
  • \n
  • \n

    This action has no effect outside Resilience Hub.

    \n
  • \n
  • \n

    This API updates the Resilience Hub application draft version. To use this resource for running resiliency assessments, you must publish the Resilience Hub application using the PublishAppVersion API.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Deletes a resource from the Resilience Hub application.

\n \n
    \n
  • \n

    You can only delete a manually added resource. To exclude non-manually added\n resources, use the UpdateAppVersionResource API.

    \n
  • \n
  • \n

    This action has no effect outside Resilience Hub.

    \n
  • \n
  • \n

    This API updates the Resilience Hub application draft version. To use this\n resource for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

    \n
  • \n
\n
", "smithy.api#http": { "method": "POST", "uri": "/delete-app-version-resource", @@ -3818,7 +4015,7 @@ "assessment": { "target": "com.amazonaws.resiliencehub#AppAssessment", "traits": { - "smithy.api#documentation": "

The assessment for an Resilience Hub application, returned as an object. This object\n includes Amazon Resource Names (ARNs), compliance information, compliance status, cost,\n messages, resiliency scores, and more.

", + "smithy.api#documentation": "

The assessment for an Resilience Hub application, returned as an object. This\n object includes Amazon Resource Names (ARNs), compliance information, compliance status, cost,\n messages, resiliency scores, and more.

", "smithy.api#required": {} } } @@ -4018,7 +4215,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a resource of the Resilience Hub application.

\n \n

This API accepts only one of the following parameters to descibe the resource:

\n
    \n
  • \n

    \n resourceName\n

    \n
  • \n
  • \n

    \n logicalResourceId\n

    \n
  • \n
  • \n

    \n physicalResourceId (Along with physicalResourceId, you can also\n provide awsAccountId, and awsRegion)

    \n
  • \n
\n
", + "smithy.api#documentation": "

Describes a resource of the Resilience Hub application.

\n \n

This API accepts only one of the following parameters to describe the resource:

\n
    \n
  • \n

    \n resourceName\n

    \n
  • \n
  • \n

    \n logicalResourceId\n

    \n
  • \n
  • \n

    \n physicalResourceId (Along with physicalResourceId, you can\n also provide awsAccountId, and awsRegion)

    \n
  • \n
\n
", "smithy.api#http": { "method": "POST", "uri": "/describe-app-version-resource", @@ -4296,7 +4493,7 @@ "appTemplateBody": { "target": "com.amazonaws.resiliencehub#AppTemplateBody", "traits": { - "smithy.api#documentation": "

A JSON string that provides information about your application structure. To learn more\n about the appTemplateBody template, see the sample template provided in the\n Examples section.

\n

The appTemplateBody JSON string has the following structure:

\n
    \n
  • \n

    \n \n resources\n \n

    \n

    The list of logical resources that must be included in the Resilience Hub\n application.

    \n

    Type: Array

    \n \n

    Don't add the resources that you want to exclude.

    \n
    \n

    Each resources array item includes the following fields:

    \n
      \n
    • \n

      \n \n logicalResourceId\n \n

      \n

      Logical identifier of the resource.

      \n

      Type: Object

      \n

      Each logicalResourceId object includes the following fields:

      \n
        \n
      • \n

        \n identifier\n

        \n

        Identifier of the resource.

        \n

        Type: String

        \n
      • \n
      • \n

        \n logicalStackName\n

        \n

        The name of the CloudFormation stack this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n resourceGroupName\n

        \n

        The name of the resource group this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n terraformSourceName\n

        \n

        The name of the Terraform S3 state file this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n eksSourceName\n

        \n

        Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

        \n \n

        This parameter accepts values in \"eks-cluster/namespace\" format.

        \n
        \n

        Type: String

        \n
      • \n
      \n
    • \n
    • \n

      \n \n type\n \n

      \n

      The type of resource.

      \n

      Type: string

      \n
    • \n
    • \n

      \n \n name\n \n

      \n

      The name of the resource.

      \n

      Type: String

      \n
    • \n
    • \n

      \n additionalInfo\n

      \n

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      \n \n

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      \n

      Key: \"failover-regions\"\n

      \n

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"\n

      \n
      \n
    • \n
    \n
  • \n
  • \n

    \n \n appComponents\n \n

    \n

    List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

    \n

    Type: Array

    \n

    Each appComponents array item includes the following fields:

    \n
      \n
    • \n

      \n name\n

      \n

      Name of the Application Component.

      \n

      Type: String

      \n
    • \n
    • \n

      \n type\n

      \n

      Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

      \n

      Type: String

      \n
    • \n
    • \n

      \n resourceNames\n

      \n

      The list of included resources that are assigned to the Application Component.

      \n

      Type: Array of strings

      \n
    • \n
    • \n

      \n additionalInfo\n

      \n

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      \n \n

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      \n

      Key: \"failover-regions\"\n

      \n

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"\n

      \n
      \n
    • \n
    \n
  • \n
  • \n

    \n \n excludedResources\n \n

    \n

    The list of logical resource identifiers to be excluded from the application.

    \n

    Type: Array

    \n \n

    Don't add the resources that you want to include.

    \n
    \n

    Each excludedResources array item includes the following fields:

    \n
      \n
    • \n

      \n \n logicalResourceIds\n \n

      \n

      Logical identifier of the resource.

      \n

      Type: Object

      \n \n

      You can configure only one of the following fields:

      \n
        \n
      • \n

        \n logicalStackName\n

        \n
      • \n
      • \n

        \n resourceGroupName\n

        \n
      • \n
      • \n

        \n terraformSourceName\n

        \n
      • \n
      • \n

        \n eksSourceName\n

        \n
      • \n
      \n
      \n

      Each logicalResourceIds object includes the following fields:

      \n
        \n
      • \n

        \n identifier\n

        \n

        Identifier of the resource.

        \n

        Type: String

        \n
      • \n
      • \n

        \n logicalStackName\n

        \n

        The name of the CloudFormation stack this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n resourceGroupName\n

        \n

        The name of the resource group this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n terraformSourceName\n

        \n

        The name of the Terraform S3 state file this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n eksSourceName\n

        \n

        Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

        \n \n

        This parameter accepts values in \"eks-cluster/namespace\" format.

        \n
        \n

        Type: String

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n \n version\n \n

    \n

    Resilience Hub application version.

    \n
  • \n
  • \n

    \n additionalInfo\n

    \n

    Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

    \n \n

    Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

    \n

    Key: \"failover-regions\"\n

    \n

    Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"\n

    \n
    \n
  • \n
", + "smithy.api#documentation": "

A JSON string that provides information about your application structure. To learn more\n about the appTemplateBody template, see the sample template provided in the\n Examples section.

\n

The appTemplateBody JSON string has the following structure:

\n
    \n
  • \n

    \n \n resources\n \n

    \n

    The list of logical resources that must be included in the Resilience Hub\n application.

    \n

    Type: Array

    \n \n

    Don't add the resources that you want to exclude.

    \n
    \n

    Each resources array item includes the following fields:

    \n
      \n
    • \n

      \n \n logicalResourceId\n \n

      \n

      Logical identifier of the resource.

      \n

      Type: Object

      \n

      Each logicalResourceId object includes the following fields:

      \n
        \n
      • \n

        \n identifier\n

        \n

        Identifier of the resource.

        \n

        Type: String

        \n
      • \n
      • \n

        \n logicalStackName\n

        \n

        The name of the CloudFormation stack this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n resourceGroupName\n

        \n

        The name of the resource group this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n terraformSourceName\n

        \n

        The name of the Terraform S3 state file this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n eksSourceName\n

        \n

        Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

        \n \n

        This parameter accepts values in \"eks-cluster/namespace\" format.

        \n
        \n

        Type: String

        \n
      • \n
      \n
    • \n
    • \n

      \n \n type\n \n

      \n

      The type of resource.

      \n

      Type: string

      \n
    • \n
    • \n

      \n \n name\n \n

      \n

      The name of the resource.

      \n

      Type: String

      \n
    • \n
    • \n

      \n additionalInfo\n

      \n

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      \n \n

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      \n

      Key: \"failover-regions\"\n

      \n

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"\n

      \n
      \n
    • \n
    \n
  • \n
  • \n

    \n \n appComponents\n \n

    \n

    List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

    \n

    Type: Array

    \n

    Each appComponents array item includes the following fields:

    \n
      \n
    • \n

      \n name\n

      \n

      Name of the Application Component.

      \n

      Type: String

      \n
    • \n
    • \n

      \n type\n

      \n

      Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

      \n

      Type: String

      \n
    • \n
    • \n

      \n resourceNames\n

      \n

      The list of included resources that are assigned to the Application Component.

      \n

      Type: Array of strings

      \n
    • \n
    • \n

      \n additionalInfo\n

      \n

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      \n \n

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      \n

      Key: \"failover-regions\"\n

      \n

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"\n

      \n
      \n
    • \n
    \n
  • \n
  • \n

    \n \n excludedResources\n \n

    \n

    The list of logical resource identifiers to be excluded from the application.

    \n

    Type: Array

    \n \n

    Don't add the resources that you want to include.

    \n
    \n

    Each excludedResources array item includes the following fields:

    \n
      \n
    • \n

      \n \n logicalResourceIds\n \n

      \n

      Logical identifier of the resource.

      \n

      Type: Object

      \n \n

      You can configure only one of the following fields:

      \n
        \n
      • \n

        \n logicalStackName\n

        \n
      • \n
      • \n

        \n resourceGroupName\n

        \n
      • \n
      • \n

        \n terraformSourceName\n

        \n
      • \n
      • \n

        \n eksSourceName\n

        \n
      • \n
      \n
      \n

      Each logicalResourceIds object includes the following fields:

      \n
        \n
      • \n

        \n identifier\n

        \n

        Identifier of the resource.

        \n

        Type: String

        \n
      • \n
      • \n

        \n logicalStackName\n

        \n

        The name of the CloudFormation stack this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n resourceGroupName\n

        \n

        The name of the resource group this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n terraformSourceName\n

        \n

        The name of the Terraform S3 state file this resource belongs to.

        \n

        Type: String

        \n
      • \n
      • \n

        \n eksSourceName\n

        \n

        Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

        \n \n

        This parameter accepts values in \"eks-cluster/namespace\" format.

        \n
        \n

        Type: String

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n \n version\n \n

    \n

    Resilience Hub application version.

    \n
  • \n
  • \n

    \n additionalInfo\n

    \n

    Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

    \n \n

    Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

    \n

    Key: \"failover-regions\"\n

    \n

    Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"\n

    \n
    \n
  • \n
", "smithy.api#required": {} } } @@ -4328,7 +4525,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the status of importing resources to an application version.

\n \n

If you get a 404 error with\n ResourceImportStatusNotFoundAppMetadataException, you must call\n importResourcesToDraftAppVersion after creating the application and before\n calling describeDraftAppVersionResourcesImportStatus to obtain the\n status.

\n
", + "smithy.api#documentation": "

Describes the status of importing resources to an application version.

\n \n

If you get a 404 error with\n ResourceImportStatusNotFoundAppMetadataException, you must call\n importResourcesToDraftAppVersion after creating the application and before\n calling describeDraftAppVersionResourcesImportStatus to obtain the\n status.

\n
", "smithy.api#http": { "method": "POST", "uri": "/describe-draft-app-version-resources-import-status", @@ -4445,6 +4642,89 @@ } } }, + "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTaskRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTaskResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Describes the resource grouping recommendation tasks run by Resilience Hub for your application.

", + "smithy.api#http": { + "method": "POST", + "uri": "/describe-resource-grouping-recommendation-task", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTaskRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "groupingId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the grouping recommendation task.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTaskResponse": { + "type": "structure", + "members": { + "groupingId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the grouping recommendation task.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.resiliencehub#ResourcesGroupingRecGenStatusType", + "traits": { + "smithy.api#documentation": "

Status of the action.

", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.resiliencehub#String500", + "traits": { + "smithy.api#documentation": "

Indicates the error that occurred while generating a grouping recommendation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#DifferenceType": { "type": "string", "traits": { @@ -4714,6 +4994,12 @@ "smithy.api#pattern": "^[A-Za-z0-9][A-Za-z0-9_\\-]{1,59}$" } }, + "com.amazonaws.resiliencehub#EntityName255": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9][A-Za-z0-9_\\-]{0,254}$" + } + }, "com.amazonaws.resiliencehub#EntityNameList": { "type": "list", "member": { @@ -4832,6 +5118,34 @@ ] } }, + "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntry" + } + }, + "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntry": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the grouping recommendation.

", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.resiliencehub#ErrorMessage", + "traits": { + "smithy.api#documentation": "

Indicates the error that occurred while implementing a grouping recommendation.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the accepted grouping recommendation whose implementation failed.

" + } + }, "com.amazonaws.resiliencehub#FailurePolicy": { "type": "structure", "members": { @@ -4856,12 +5170,224 @@ "smithy.api#documentation": "

Defines a failure policy.

" } }, - "com.amazonaws.resiliencehub#HaArchitecture": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "MultiSite", + "com.amazonaws.resiliencehub#GroupingAppComponent": { + "type": "structure", + "members": { + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of an AppComponent.

", + "smithy.api#required": {} + } + }, + "appComponentType": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the type of an AppComponent.

", + "smithy.api#required": {} + } + }, + "appComponentName": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the name of an AppComponent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Creates a new recommended Application Component (AppComponent).

" + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendation": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates all the reasons available for rejecting a grouping recommendation.

", + "smithy.api#required": {} + } + }, + "groupingAppComponent": { + "target": "com.amazonaws.resiliencehub#GroupingAppComponent", + "traits": { + "smithy.api#documentation": "

Indicates the name of the recommended Application Component (AppComponent).

", + "smithy.api#required": {} + } + }, + "resources": { + "target": "com.amazonaws.resiliencehub#GroupingResourceList", + "traits": { + "smithy.api#documentation": "

Indicates the resources that are grouped in a recommended AppComponent.

", + "smithy.api#required": {} + } + }, + "score": { + "target": "com.amazonaws.resiliencehub#Double", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

Indicates the confidence level of the grouping recommendation.

", + "smithy.api#required": {} + } + }, + "recommendationReasons": { + "target": "com.amazonaws.resiliencehub#String255List", + "traits": { + "smithy.api#documentation": "

Indicates all the reasons available for rejecting a grouping recommendation.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationStatusType", + "traits": { + "smithy.api#documentation": "

Indicates the status of grouping resources into AppComponents.

", + "smithy.api#required": {} + } + }, + "confidenceLevel": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationConfidenceLevel", + "traits": { + "smithy.api#documentation": "

Indicates the confidence level of Resilience Hub on the grouping recommendation.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.resiliencehub#TimeStamp", + "traits": { + "smithy.api#documentation": "

Indicates the creation time of the grouping recommendation.

", + "smithy.api#required": {} + } + }, + "rejectionReason": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason", + "traits": { + "smithy.api#documentation": "

Indicates the reason you had selected while rejecting a grouping recommendation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Creates a new grouping recommendation.

" + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationConfidenceLevel": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "HIGH", + "value": "High" + }, + { + "name": "MEDIUM", + "value": "Medium" + } + ] + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendation" + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "DISTINCT_BUSINESS_PURPOSE", + "value": "DistinctBusinessPurpose" + }, + { + "name": "SEPARATE_DATA_CONCERN", + "value": "SeparateDataConcern" + }, + { + "name": "DISTINCT_USER_GROUP_HANDLING", + "value": "DistinctUserGroupHandling" + }, + { + "name": "OTHER", + "value": "Other" + } + ] + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationStatusType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "ACCEPTED", + "value": "Accepted" + }, + { + "name": "REJECTED", + "value": "Rejected" + }, + { + "name": "PENDING_DECISION", + "value": "PendingDecision" + } + ] + } + }, + "com.amazonaws.resiliencehub#GroupingResource": { + "type": "structure", + "members": { + "resourceName": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the resource name.

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the resource type.

", + "smithy.api#required": {} + } + }, + "physicalResourceId": { + "target": "com.amazonaws.resiliencehub#PhysicalResourceId", + "traits": { + "smithy.api#documentation": "

Indicates the physical identifier of the resource.

", + "smithy.api#required": {} + } + }, + "logicalResourceId": { + "target": "com.amazonaws.resiliencehub#LogicalResourceId", + "traits": { + "smithy.api#documentation": "

Indicates the logical identifier of the resource.

", + "smithy.api#required": {} + } + }, + "sourceAppComponentIds": { + "target": "com.amazonaws.resiliencehub#String255List", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the source AppComponents in which the resources were previously grouped into.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the resource that will be grouped in the recommended Application Component (AppComponent).

" + } + }, + "com.amazonaws.resiliencehub#GroupingResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#GroupingResource" + } + }, + "com.amazonaws.resiliencehub#HaArchitecture": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "MultiSite", "name": "MULTI_SITE" }, { @@ -4939,7 +5465,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports resources to Resilience Hub application draft version from different input sources. For more information about the input sources supported by Resilience Hub, see Discover\n the structure and describe your Resilience Hub application.

", + "smithy.api#documentation": "

Imports resources to Resilience Hub application draft version from different input\n sources. For more information about the input sources supported by Resilience Hub, see\n Discover the structure and describe your Resilience Hub application.

", "smithy.api#http": { "method": "POST", "uri": "/import-resources-to-draft-app-version", @@ -5016,7 +5542,7 @@ "terraformSources": { "target": "com.amazonaws.resiliencehub#TerraformSourceList", "traits": { - "smithy.api#documentation": "

\n A list of terraform file s3 URLs you have imported.\n

" + "smithy.api#documentation": "

A list of terraform file s3 URLs you have imported.

" } }, "eksSources": { @@ -5118,7 +5644,7 @@ "alarmRecommendations": { "target": "com.amazonaws.resiliencehub#AlarmRecommendationList", "traits": { - "smithy.api#documentation": "

The alarm recommendations for an Resilience Hub application, returned as an object. This\n object includes Application Component names, descriptions, information about whether a\n recommendation has already been implemented or not, prerequisites, and more.

", + "smithy.api#documentation": "

The alarm recommendations for an Resilience Hub application, returned as an\n object. This object includes Application Component names, descriptions, information about whether a\n recommendation has already been implemented or not, prerequisites, and more.

", "smithy.api#required": {} } }, @@ -5179,13 +5705,13 @@ "nextToken": { "target": "com.amazonaws.resiliencehub#NextToken", "traits": { - "smithy.api#documentation": "

Indicates the unique token number of the next application to be checked for compliance and regulatory requirements from the list of applications.

" + "smithy.api#documentation": "

Null, or the token from a previous call to get the next set of results.

" } }, "maxResults": { "target": "com.amazonaws.resiliencehub#MaxResults", "traits": { - "smithy.api#documentation": "

Indicates the maximum number of applications requested.

" + "smithy.api#documentation": "

Indicates the maximum number of compliance drifts requested.

" } } }, @@ -5199,14 +5725,14 @@ "complianceDrifts": { "target": "com.amazonaws.resiliencehub#ComplianceDriftList", "traits": { - "smithy.api#documentation": "

Indicates compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) detected for an assessed entity.

", + "smithy.api#documentation": "

Indicates compliance drifts (recovery time objective (RTO) and recovery point objective\n (RPO)) detected for an assessed entity.

", "smithy.api#required": {} } }, "nextToken": { "target": "com.amazonaws.resiliencehub#NextToken", "traits": { - "smithy.api#documentation": "

Token number of the next application to be checked for compliance and regulatory requirements from the list of applications.

" + "smithy.api#documentation": "

Null, or the token from a previous call to get the next set of results.

" } } }, @@ -5237,7 +5763,7 @@ } ], "traits": { - "smithy.api#documentation": "

Indicates the list of resource drifts that were detected while running an assessment.

", + "smithy.api#documentation": "

Indicates the list of resource drifts that were detected while running an\n assessment.

", "smithy.api#http": { "method": "POST", "uri": "/list-app-assessment-resource-drifts", @@ -5270,7 +5796,7 @@ "maxResults": { "target": "com.amazonaws.resiliencehub#MaxResults", "traits": { - "smithy.api#documentation": "

Indicates the maximum number of drift results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" + "smithy.api#documentation": "

Indicates the maximum number of drift results to include in the response. If more results\n exist than the specified MaxResults value, a token is included in the response so\n that the remaining results can be retrieved.

" } } }, @@ -5325,7 +5851,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the assessments for an Resilience Hub application. You can use request parameters to\n refine the results for the response object.

", + "smithy.api#documentation": "

Lists the assessments for an Resilience Hub application. You can use request\n parameters to refine the results for the response object.

", "smithy.api#http": { "method": "GET", "uri": "/list-app-assessments", @@ -5487,7 +6013,7 @@ "componentCompliances": { "target": "com.amazonaws.resiliencehub#ComponentCompliancesList", "traits": { - "smithy.api#documentation": "

The compliances for an Resilience Hub Application Component, returned as an object. This\n object contains the names of the Application Components, compliances, costs, resiliency scores, outage scores, and\n more.

", + "smithy.api#documentation": "

The compliances for an Resilience Hub Application Component, returned as an object. This\n object contains the names of the Application Components, compliances, costs, resiliency scores,\n outage scores, and more.

", "smithy.api#required": {} } }, @@ -5568,7 +6094,7 @@ "componentRecommendations": { "target": "com.amazonaws.resiliencehub#ComponentRecommendationList", "traits": { - "smithy.api#documentation": "

The recommendations for an Resilience Hub Application Component, returned as an object. This\n object contains the names of the Application Components, configuration recommendations, and recommendation\n statuses.

", + "smithy.api#documentation": "

The recommendations for an Resilience Hub Application Component, returned as an object.\n This object contains the names of the Application Components, configuration recommendations, and\n recommendation statuses.

", "smithy.api#required": {} } }, @@ -5606,7 +6132,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all the input sources of the Resilience Hub application. For more information about the\n input sources supported by Resilience Hub, see Discover\n the structure and describe your Resilience Hub application.

", + "smithy.api#documentation": "

Lists all the input sources of the Resilience Hub application. For more\n information about the input sources supported by Resilience Hub, see Discover\n the structure and describe your Resilience Hub application.

", "smithy.api#http": { "method": "POST", "uri": "/list-app-input-sources", @@ -5645,7 +6171,7 @@ "maxResults": { "target": "com.amazonaws.resiliencehub#MaxResults", "traits": { - "smithy.api#documentation": "

Maximum number of input sources to be displayed per Resilience Hub application.

" + "smithy.api#documentation": "

Maximum number of input sources to be displayed per Resilience Hub\n application.

" } } } @@ -6125,21 +6651,21 @@ "fromLastAssessmentTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Indicates the lower limit of the range that is used to filter applications based on their last assessment times.

", + "smithy.api#documentation": "

Indicates the lower limit of the range that is used to filter applications based on their\n last assessment times.

", "smithy.api#httpQuery": "fromLastAssessmentTime" } }, "toLastAssessmentTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Indicates the upper limit of the range that is used to filter the applications based on their last assessment times.

", + "smithy.api#documentation": "

Indicates the upper limit of the range that is used to filter the applications based on\n their last assessment times.

", "smithy.api#httpQuery": "toLastAssessmentTime" } }, "reverseOrder": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "

The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending order. To sort the appliation list in descending order, set this field to True.

", + "smithy.api#documentation": "

The application list is sorted based on the values of\n lastAppComplianceEvaluationTime field. By default, application list is sorted\n in ascending order. To sort the application list in descending order, set this field to\n True.

", "smithy.api#httpQuery": "reverseOrder" } } @@ -6355,6 +6881,97 @@ } } }, + "com.amazonaws.resiliencehub#ListResourceGroupingRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#ListResourceGroupingRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#ListResourceGroupingRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the resource grouping recommendations suggested by Resilience Hub for your application.

", + "smithy.api#http": { + "method": "GET", + "uri": "/list-resource-grouping-recommendations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "groupingRecommendations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.resiliencehub#ListResourceGroupingRecommendationsRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#httpQuery": "appArn" + } + }, + "nextToken": { + "target": "com.amazonaws.resiliencehub#NextToken", + "traits": { + "smithy.api#documentation": "

Null, or the token from a previous call to get the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.resiliencehub#MaxResults", + "traits": { + "smithy.api#documentation": "

Maximum number of grouping recommendations to be displayed per Resilience Hub application.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#ListResourceGroupingRecommendationsResponse": { + "type": "structure", + "members": { + "groupingRecommendations": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationList", + "traits": { + "smithy.api#documentation": "

List of resource grouping recommendations generated by Resilience Hub.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.resiliencehub#NextToken", + "traits": { + "smithy.api#documentation": "

Null, or the token from a previous call to get the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#ListSopRecommendations": { "type": "operation", "input": { @@ -6384,7 +7001,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the standard operating procedure (SOP) recommendations for the Resilience Hub\n applications.

", + "smithy.api#documentation": "

Lists the standard operating procedure (SOP) recommendations for the Resilience Hub applications.

", "smithy.api#http": { "method": "POST", "uri": "/list-sop-recommendations", @@ -6465,7 +7082,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the suggested resiliency policies for the Resilience Hub applications.

", + "smithy.api#documentation": "

Lists the suggested resiliency policies for the Resilience Hub\n applications.

", "smithy.api#http": { "method": "GET", "uri": "/list-suggested-resiliency-policies", @@ -6688,7 +7305,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the resources that are not currently supported in Resilience Hub. An unsupported\n resource is a resource that exists in the object that was used to create an app, but is not\n supported by Resilience Hub.

", + "smithy.api#documentation": "

Lists the resources that are not currently supported in Resilience Hub. An\n unsupported resource is a resource that exists in the object that was used to create an app,\n but is not supported by Resilience Hub.

", "smithy.api#http": { "method": "POST", "uri": "/list-unsupported-app-version-resources", @@ -7102,7 +7719,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds or updates the app template for an Resilience Hub application draft version.

", + "smithy.api#documentation": "

Adds or updates the app template for an Resilience Hub application draft\n version.

", "smithy.api#http": { "method": "POST", "uri": "/put-draft-app-version-template", @@ -7170,6 +7787,10 @@ { "value": "MetCanImprove", "name": "MET_CAN_IMPROVE" + }, + { + "value": "MissingPolicy", + "name": "MISSING_POLICY" } ] } @@ -7439,6 +8060,117 @@ } } }, + "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntry" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 30 + } + } + }, + "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntry": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the grouping recommendation.

", + "smithy.api#required": {} + } + }, + "rejectionReason": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason", + "traits": { + "smithy.api#documentation": "

Indicates the reason you had selected while rejecting a grouping recommendation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the rejected grouping recommendation.

" + } + }, + "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Rejects resource grouping recommendations.

", + "smithy.api#http": { + "method": "POST", + "uri": "/reject-resource-grouping-recommendations", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "entries": { + "target": "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntries", + "traits": { + "smithy.api#documentation": "

Indicates the list of resource grouping recommendations you have selected to exclude from your application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsResponse": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "failedEntries": { + "target": "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries", + "traits": { + "smithy.api#documentation": "

Indicates the list of resource grouping recommendations that failed to get excluded in your application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#RemoveDraftAppVersionResourceMappings": { "type": "operation", "input": { @@ -7501,7 +8233,7 @@ "appRegistryAppNames": { "target": "com.amazonaws.resiliencehub#EntityNameList", "traits": { - "smithy.api#documentation": "

The names of the registered applications you want to remove from the resource mappings.

" + "smithy.api#documentation": "

The names of the registered applications you want to remove from the resource\n mappings.

" } }, "resourceGroupNames": { @@ -7519,7 +8251,7 @@ "eksSourceNames": { "target": "com.amazonaws.resiliencehub#String255List", "traits": { - "smithy.api#documentation": "

The names of the Amazon Elastic Kubernetes Service clusters and namespaces you want to remove from the resource mappings.

\n \n

This parameter accepts values in \"eks-cluster/namespace\" format.

\n
" + "smithy.api#documentation": "

The names of the Amazon Elastic Kubernetes Service clusters and namespaces you want to remove from\n the resource mappings.

\n \n

This parameter accepts values in \"eks-cluster/namespace\" format.

\n
" } } } @@ -7900,7 +8632,7 @@ "hasMoreErrors": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "

This indicates if there are more errors not listed in the resourceErrors list.

" + "smithy.api#documentation": "

This indicates if there are more errors not listed in the\n resourceErrors\n list.

" } } }, @@ -8136,6 +8868,29 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.resiliencehub#ResourcesGroupingRecGenStatusType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "PENDING", + "value": "Pending" + }, + { + "name": "IN_PROGRESS", + "value": "InProgress" + }, + { + "name": "FAILED", + "value": "Failed" + }, + { + "name": "SUCCESS", + "value": "Success" + } + ] + } + }, "com.amazonaws.resiliencehub#RetryAfterSeconds": { "type": "integer" }, @@ -8417,6 +9172,93 @@ } } }, + "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTaskRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTaskResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#ConflictException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Starts grouping recommendation task.

", + "smithy.api#http": { + "method": "POST", + "uri": "/start-resource-grouping-recommendation-task", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTaskRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTaskResponse": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "groupingId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the grouping recommendation task.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.resiliencehub#ResourcesGroupingRecGenStatusType", + "traits": { + "smithy.api#documentation": "

Status of the action.

", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.resiliencehub#String500", + "traits": { + "smithy.api#documentation": "

Indicates the error that occurred while executing a grouping recommendation task.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#String1024": { "type": "string", "traits": { @@ -8933,7 +9775,7 @@ "assessmentSchedule": { "target": "com.amazonaws.resiliencehub#AppAssessmentScheduleType", "traits": { - "smithy.api#documentation": "

\n Assessment execution schedule with 'Daily' or 'Disabled' values.\n

" + "smithy.api#documentation": "

Assessment execution schedule with 'Daily' or 'Disabled' values.

" } }, "permissionModel": { @@ -8945,7 +9787,7 @@ "eventSubscriptions": { "target": "com.amazonaws.resiliencehub#EventSubscriptionList", "traits": { - "smithy.api#documentation": "

The list of events you would like to subscribe and get notification for.\n Currently, Resilience Hub supports notifications only for Drift\n detected and Scheduled assessment failure\n events.

" + "smithy.api#documentation": "

The list of events you would like to subscribe and get notification for. Currently,\n Resilience Hub supports notifications only for Drift\n detected and Scheduled assessment failure\n events.

" } } } @@ -8991,7 +9833,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the Resilience Hub application version.

\n \n

This API updates the Resilience Hub application draft version. To use this information\n for running resiliency assessments, you must publish the Resilience Hub application using the\n PublishAppVersion API.

\n
", + "smithy.api#documentation": "

Updates the Resilience Hub application version.

\n \n

This API updates the Resilience Hub application draft version. To use this\n information for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

\n
", "smithy.api#http": { "method": "POST", "uri": "/update-app-version", @@ -9028,7 +9870,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an existing Application Component in the Resilience Hub application.

\n \n

This API updates the Resilience Hub application draft version. To use this Application Component for running assessments, you must publish the Resilience Hub application using the PublishAppVersion API.

\n
", + "smithy.api#documentation": "

Updates an existing Application Component in the Resilience Hub application.

\n \n

This API updates the Resilience Hub application draft version. To use this\n Application Component for running assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

\n
", "smithy.api#http": { "method": "POST", "uri": "/update-app-version-app-component", @@ -9148,7 +9990,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the resource details in the Resilience Hub application.

\n \n
    \n
  • \n

    This action has no effect outside Resilience Hub.

    \n
  • \n
  • \n

    This API updates the Resilience Hub application draft version. To use this resource for running resiliency assessments, you must publish the Resilience Hub application using the PublishAppVersion API.

    \n
  • \n
  • \n

    To update application version with new physicalResourceID, you must call\n ResolveAppVersionResources API.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Updates the resource details in the Resilience Hub application.

\n \n
    \n
  • \n

    This action has no effect outside Resilience Hub.

    \n
  • \n
  • \n

    This API updates the Resilience Hub application draft version. To use this\n resource for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion API.

    \n
  • \n
  • \n

    To update application version with new physicalResourceID, you must\n call ResolveAppVersionResources API.

    \n
  • \n
\n
", "smithy.api#http": { "method": "POST", "uri": "/update-app-version-resource", @@ -9217,7 +10059,7 @@ "excluded": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "

Indicates if a resource is excluded from an Resilience Hub application.

\n \n

You can exclude only imported resources from an Resilience Hub application.

\n
" + "smithy.api#documentation": "

Indicates if a resource is excluded from an Resilience Hub application.

\n \n

You can exclude only imported resources from an Resilience Hub\n application.

\n
" } } } @@ -9381,7 +10223,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a resiliency policy.

\n \n

Resilience Hub allows you to provide a value of zero for rtoInSecs and\n rpoInSecs of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value\n zero for rtoInSecs and rpoInSecs, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.

\n
", + "smithy.api#documentation": "

Updates a resiliency policy.

\n \n

Resilience Hub allows you to provide a value of zero for rtoInSecs\n and rpoInSecs of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value zero for\n rtoInSecs and rpoInSecs, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.

\n
", "smithy.api#http": { "method": "POST", "uri": "/update-resiliency-policy", diff --git a/models/rolesanywhere.json b/models/rolesanywhere.json index 0d7da1ff85..8d19558919 100644 --- a/models/rolesanywhere.json +++ b/models/rolesanywhere.json @@ -149,6 +149,12 @@ "traits": { "smithy.api#documentation": "

The tags to attach to the profile.

" } + }, + "acceptRoleSessionName": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + } } } }, @@ -405,6 +411,7 @@ }, "output": { "profile": { + "acceptRoleSessionName": false, "attributeMappings": [ { "mappingRules": [ @@ -1533,6 +1540,12 @@ "smithy.api#documentation": "

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the \nCreateSession API documentation\npage for more details. In requests, if this value is not provided, the default value will be 3600.

" } }, + "acceptRoleSessionName": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + } + }, "attributeMappings": { "target": "com.amazonaws.rolesanywhere#AttributeMappings", "traits": { @@ -1596,6 +1609,7 @@ }, "output": { "profile": { + "acceptRoleSessionName": false, "attributeMappings": [ { "mappingRules": [ @@ -3560,6 +3574,12 @@ "max": 43200 } } + }, + "acceptRoleSessionName": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + } } } }, diff --git a/models/route-53.json b/models/route-53.json index 85c14d670f..ef1d914499 100644 --- a/models/route-53.json +++ b/models/route-53.json @@ -2175,91 +2175,6 @@ "traits": { "smithy.api#documentation": "

Creates, changes, or deletes a resource record set, which contains authoritative DNS\n\t\t\tinformation for a specified domain name or subdomain name. For example, you can use\n\t\t\t\tChangeResourceRecordSets to create a resource record set that routes\n\t\t\ttraffic for test.example.com to a web server that has an IP address of\n\t\t\t192.0.2.44.

\n

\n Deleting Resource Record Sets\n

\n

To delete a resource record set, you must specify all the same values that you\n\t\t\tspecified when you created it.

\n

\n Change Batches and Transactional Changes\n

\n

The request body must include a document with a\n\t\t\t\tChangeResourceRecordSetsRequest element. The request body contains a\n\t\t\tlist of change items, known as a change batch. Change batches are considered\n\t\t\ttransactional changes. Route 53 validates the changes in the request and then either\n\t\t\tmakes all or none of the changes in the change batch request. This ensures that DNS\n\t\t\trouting isn't adversely affected by partial changes to the resource record sets in a\n\t\t\thosted zone.

\n

For example, suppose a change batch request contains two changes: it deletes the\n\t\t\t\tCNAME resource record set for www.example.com and creates an alias\n\t\t\tresource record set for www.example.com. If validation for both records succeeds, Route\n\t\t\t53 deletes the first resource record set and creates the second resource record set in a\n\t\t\tsingle operation. If validation for either the DELETE or the\n\t\t\t\tCREATE action fails, then the request is canceled, and the original\n\t\t\t\tCNAME record continues to exist.

\n \n

If you try to delete the same resource record set more than once in a single\n\t\t\t\tchange batch, Route 53 returns an InvalidChangeBatch error.

\n
\n

\n Traffic Flow\n

\n

To create resource record sets for complex routing configurations, use either the\n\t\t\ttraffic flow visual editor in the Route 53 console or the API actions for traffic\n\t\t\tpolicies and traffic policy instances. Save the configuration as a traffic policy, then\n\t\t\tassociate the traffic policy with one or more domain names (such as example.com) or\n\t\t\tsubdomain names (such as www.example.com), in the same hosted zone or in multiple hosted\n\t\t\tzones. You can roll back the updates if the new configuration isn't performing as\n\t\t\texpected. For more information, see Using Traffic Flow to Route\n\t\t\t\tDNS Traffic in the Amazon Route 53 Developer\n\t\t\tGuide.

\n

\n Create, Delete, and Upsert\n

\n

Use ChangeResourceRecordsSetsRequest to perform the following\n\t\t\tactions:

\n
    \n
  • \n

    \n CREATE: Creates a resource record set that has the specified\n\t\t\t\t\tvalues.

    \n
  • \n
  • \n

    \n DELETE: Deletes an existing resource record set that has the\n\t\t\t\t\tspecified values.

    \n
  • \n
  • \n

    \n UPSERT: If a resource set doesn't exist, Route 53 creates it. If a resource\n\t\t\t\t\tset exists Route 53 updates it with the values in the request.

    \n
  • \n
\n

\n Syntaxes for Creating, Updating, and Deleting Resource Record\n\t\t\t\tSets\n

\n

The syntax for a request depends on the type of resource record set that you want to\n\t\t\tcreate, delete, or update, such as weighted, alias, or failover. The XML elements in\n\t\t\tyour request must appear in the order listed in the syntax.

\n

For an example for each type of resource record set, see \"Examples.\"

\n

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes\n\t\t\tall of the elements for every kind of resource record set that you can create, delete,\n\t\t\tor update by using ChangeResourceRecordSets.

\n

\n Change Propagation to Route 53 DNS Servers\n

\n

When you submit a ChangeResourceRecordSets request, Route 53 propagates your\n\t\t\tchanges to all of the Route 53 authoritative DNS servers managing the hosted zone. While\n\t\t\tyour changes are propagating, GetChange returns a status of\n\t\t\t\tPENDING. When propagation is complete, GetChange returns a\n\t\t\tstatus of INSYNC. Changes generally propagate to all Route 53 name servers\n\t\t\tmanaging the hosted zone within 60 seconds. For more information, see GetChange.

\n

\n Limits on ChangeResourceRecordSets Requests\n

\n

For information about the limits on a ChangeResourceRecordSets request,\n\t\t\tsee Limits in the Amazon Route 53 Developer Guide.

", "smithy.api#examples": [ - { - "title": "To create a basic resource record set", - "documentation": "The following example creates a resource record set that routes Internet traffic to a resource with an IP address of 192.0.2.44.", - "input": { - "HostedZoneId": "Z3M3LMPEXAMPLE", - "ChangeBatch": { - "Comment": "Web server for example.com", - "Changes": [ - { - "Action": "CREATE", - "ResourceRecordSet": { - "Name": "example.com", - "Type": "A", - "TTL": 60, - "ResourceRecords": [ - { - "Value": "192.0.2.44" - } - ] - } - } - ] - } - }, - "output": { - "ChangeInfo": { - "Comment": "Web server for example.com", - "Id": "/change/C2682N5HXP0BZ4", - "Status": "PENDING", - "SubmittedAt": "2017-02-10T01:36:41.958Z" - } - } - }, - { - "title": "To create weighted resource record sets", - "documentation": "The following example creates two weighted resource record sets. The resource with a Weight of 100 will get 1/3rd of traffic (100/100+200), and the other resource will get the rest of the traffic for example.com.", - "input": { - "HostedZoneId": "Z3M3LMPEXAMPLE", - "ChangeBatch": { - "Comment": "Web servers for example.com", - "Changes": [ - { - "Action": "CREATE", - "ResourceRecordSet": { - "Name": "example.com", - "Type": "A", - "SetIdentifier": "Seattle data center", - "Weight": 100, - "TTL": 60, - "ResourceRecords": [ - { - "Value": "192.0.2.44" - } - ], - "HealthCheckId": "abcdef11-2222-3333-4444-555555fedcba" - } - }, - { - "Action": "CREATE", - "ResourceRecordSet": { - "Name": "example.com", - "Type": "A", - "SetIdentifier": "Portland data center", - "Weight": 200, - "TTL": 60, - "ResourceRecords": [ - { - "Value": "192.0.2.45" - } - ], - "HealthCheckId": "abcdef66-7777-8888-9999-000000fedcba" - } - } - ] - } - }, - "output": { - "ChangeInfo": { - "Comment": "Web servers for example.com", - "Id": "/change/C2682N5HXP0BZ4", - "Status": "PENDING", - "SubmittedAt": "2017-02-10T01:36:41.958Z" - } - } - }, { "title": "To create an alias resource record set", "documentation": "The following example creates an alias resource record set that routes traffic to a CloudFront distribution.", @@ -2293,12 +2208,12 @@ } }, { - "title": "To create weighted alias resource record sets", - "documentation": "The following example creates two weighted alias resource record sets that route traffic to ELB load balancers. The resource with a Weight of 100 will get 1/3rd of traffic (100/100+200), and the other resource will get the rest of the traffic for example.com.", + "title": "To create failover alias resource record sets", + "documentation": "The following example creates primary and secondary failover alias resource record sets that route traffic to ELB load balancers. Traffic is generally routed to the primary resource, in the Ohio region. If that resource is unavailable, traffic is routed to the secondary resource, in the Oregon region.", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "ELB load balancers for example.com", + "Comment": "Failover alias configuration for example.com", "Changes": [ { "Action": "CREATE", @@ -2306,7 +2221,7 @@ "Name": "example.com", "Type": "A", "SetIdentifier": "Ohio region", - "Weight": 100, + "Failover": "PRIMARY", "AliasTarget": { "HostedZoneId": "Z3AADJGX6KTTL2", "DNSName": "example-com-123456789.us-east-2.elb.amazonaws.com ", @@ -2320,7 +2235,7 @@ "Name": "example.com", "Type": "A", "SetIdentifier": "Oregon region", - "Weight": 200, + "Failover": "SECONDARY", "AliasTarget": { "HostedZoneId": "Z1H1FL5HABSF5", "DNSName": "example-com-987654321.us-west-2.elb.amazonaws.com ", @@ -2333,7 +2248,7 @@ }, "output": { "ChangeInfo": { - "Comment": "ELB load balancers for example.com", + "Comment": "Failover alias configuration for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -2341,12 +2256,12 @@ } }, { - "title": "To create latency resource record sets", - "documentation": "The following example creates two latency resource record sets that route traffic to EC2 instances. Traffic for example.com is routed either to the Ohio region or the Oregon region, depending on the latency between the user and those regions.", + "title": "To create failover resource record sets", + "documentation": "The following example creates primary and secondary failover resource record sets that route traffic to EC2 instances. Traffic is generally routed to the primary resource, in the Ohio region. If that resource is unavailable, traffic is routed to the secondary resource, in the Oregon region.", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "EC2 instances for example.com", + "Comment": "Failover configuration for example.com", "Changes": [ { "Action": "CREATE", @@ -2354,7 +2269,7 @@ "Name": "example.com", "Type": "A", "SetIdentifier": "Ohio region", - "Region": "us-east-2", + "Failover": "PRIMARY", "TTL": 60, "ResourceRecords": [ { @@ -2370,7 +2285,7 @@ "Name": "example.com", "Type": "A", "SetIdentifier": "Oregon region", - "Region": "us-west-2", + "Failover": "SECONDARY", "TTL": 60, "ResourceRecords": [ { @@ -2385,7 +2300,7 @@ }, "output": { "ChangeInfo": { - "Comment": "EC2 instances for example.com", + "Comment": "Failover configuration for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -2393,20 +2308,22 @@ } }, { - "title": "To create latency alias resource record sets", - "documentation": "The following example creates two latency alias resource record sets that route traffic for example.com to ELB load balancers. Requests are routed either to the Ohio region or the Oregon region, depending on the latency between the user and those regions.", + "title": "To create geolocation alias resource record sets", + "documentation": "The following example creates four geolocation alias resource record sets that route traffic to ELB load balancers. Traffic is routed to one of four IP addresses, for North America (NA), for South America (SA), for Europe (EU), and for all other locations (*).", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "ELB load balancers for example.com", + "Comment": "Geolocation alias configuration for example.com", "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Ohio region", - "Region": "us-east-2", + "SetIdentifier": "North America", + "GeoLocation": { + "ContinentCode": "NA" + }, "AliasTarget": { "HostedZoneId": "Z3AADJGX6KTTL2", "DNSName": "example-com-123456789.us-east-2.elb.amazonaws.com ", @@ -2419,11 +2336,45 @@ "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Oregon region", - "Region": "us-west-2", + "SetIdentifier": "South America", + "GeoLocation": { + "ContinentCode": "SA" + }, "AliasTarget": { - "HostedZoneId": "Z1H1FL5HABSF5", - "DNSName": "example-com-987654321.us-west-2.elb.amazonaws.com ", + "HostedZoneId": "Z2P70J7HTTTPLU", + "DNSName": "example-com-234567890.sa-east-1.elb.amazonaws.com ", + "EvaluateTargetHealth": true + } + } + }, + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "example.com", + "Type": "A", + "SetIdentifier": "Europe", + "GeoLocation": { + "ContinentCode": "EU" + }, + "AliasTarget": { + "HostedZoneId": "Z215JYRZR1TBD5", + "DNSName": "example-com-234567890.eu-central-1.elb.amazonaws.com ", + "EvaluateTargetHealth": true + } + } + }, + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "example.com", + "Type": "A", + "SetIdentifier": "Other locations", + "GeoLocation": { + "CountryCode": "*" + }, + "AliasTarget": { + "HostedZoneId": "Z1LMS91P8CMLE5", + "DNSName": "example-com-234567890.ap-southeast-1.elb.amazonaws.com ", "EvaluateTargetHealth": true } } @@ -2433,7 +2384,7 @@ }, "output": { "ChangeInfo": { - "Comment": "ELB load balancers for example.com", + "Comment": "Geolocation alias configuration for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -2441,27 +2392,28 @@ } }, { - "title": "To create failover resource record sets", - "documentation": "The following example creates primary and secondary failover resource record sets that route traffic to EC2 instances. Traffic is generally routed to the primary resource, in the Ohio region. If that resource is unavailable, traffic is routed to the secondary resource, in the Oregon region.", + "title": "To create geolocation resource record sets", + "documentation": "The following example creates four geolocation resource record sets that use IPv4 addresses to route traffic to resources such as web servers running on EC2 instances. Traffic is routed to one of four IP addresses, for North America (NA), for South America (SA), for Europe (EU), and for all other locations (*).", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "Failover configuration for example.com", + "Comment": "Geolocation configuration for example.com", "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Ohio region", - "Failover": "PRIMARY", + "SetIdentifier": "North America", + "GeoLocation": { + "ContinentCode": "NA" + }, "TTL": 60, "ResourceRecords": [ { "Value": "192.0.2.44" } - ], - "HealthCheckId": "abcdef11-2222-3333-4444-555555fedcba" + ] } }, { @@ -2469,15 +2421,50 @@ "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Oregon region", - "Failover": "SECONDARY", + "SetIdentifier": "South America", + "GeoLocation": { + "ContinentCode": "SA" + }, "TTL": 60, "ResourceRecords": [ { "Value": "192.0.2.45" } - ], - "HealthCheckId": "abcdef66-7777-8888-9999-000000fedcba" + ] + } + }, + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "example.com", + "Type": "A", + "SetIdentifier": "Europe", + "GeoLocation": { + "ContinentCode": "EU" + }, + "TTL": 60, + "ResourceRecords": [ + { + "Value": "192.0.2.46" + } + ] + } + }, + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "example.com", + "Type": "A", + "SetIdentifier": "Other locations", + "GeoLocation": { + "CountryCode": "*" + }, + "TTL": 60, + "ResourceRecords": [ + { + "Value": "192.0.2.47" + } + ] } } ] @@ -2485,7 +2472,7 @@ }, "output": { "ChangeInfo": { - "Comment": "Failover configuration for example.com", + "Comment": "Geolocation configuration for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -2493,12 +2480,12 @@ } }, { - "title": "To create failover alias resource record sets", - "documentation": "The following example creates primary and secondary failover alias resource record sets that route traffic to ELB load balancers. Traffic is generally routed to the primary resource, in the Ohio region. If that resource is unavailable, traffic is routed to the secondary resource, in the Oregon region.", + "title": "To create latency alias resource record sets", + "documentation": "The following example creates two latency alias resource record sets that route traffic for example.com to ELB load balancers. Requests are routed either to the Ohio region or the Oregon region, depending on the latency between the user and those regions.", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "Failover alias configuration for example.com", + "Comment": "ELB load balancers for example.com", "Changes": [ { "Action": "CREATE", @@ -2506,7 +2493,7 @@ "Name": "example.com", "Type": "A", "SetIdentifier": "Ohio region", - "Failover": "PRIMARY", + "Region": "us-east-2", "AliasTarget": { "HostedZoneId": "Z3AADJGX6KTTL2", "DNSName": "example-com-123456789.us-east-2.elb.amazonaws.com ", @@ -2520,7 +2507,7 @@ "Name": "example.com", "Type": "A", "SetIdentifier": "Oregon region", - "Failover": "SECONDARY", + "Region": "us-west-2", "AliasTarget": { "HostedZoneId": "Z1H1FL5HABSF5", "DNSName": "example-com-987654321.us-west-2.elb.amazonaws.com ", @@ -2533,7 +2520,7 @@ }, "output": { "ChangeInfo": { - "Comment": "Failover alias configuration for example.com", + "Comment": "ELB load balancers for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -2541,28 +2528,27 @@ } }, { - "title": "To create geolocation resource record sets", - "documentation": "The following example creates four geolocation resource record sets that use IPv4 addresses to route traffic to resources such as web servers running on EC2 instances. Traffic is routed to one of four IP addresses, for North America (NA), for South America (SA), for Europe (EU), and for all other locations (*).", + "title": "To create latency resource record sets", + "documentation": "The following example creates two latency resource record sets that route traffic to EC2 instances. Traffic for example.com is routed either to the Ohio region or the Oregon region, depending on the latency between the user and those regions.", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "Geolocation configuration for example.com", + "Comment": "EC2 instances for example.com", "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "North America", - "GeoLocation": { - "ContinentCode": "NA" - }, + "SetIdentifier": "Ohio region", + "Region": "us-east-2", "TTL": 60, "ResourceRecords": [ { "Value": "192.0.2.44" } - ] + ], + "HealthCheckId": "abcdef11-2222-3333-4444-555555fedcba" } }, { @@ -2570,48 +2556,46 @@ "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "South America", - "GeoLocation": { - "ContinentCode": "SA" - }, + "SetIdentifier": "Oregon region", + "Region": "us-west-2", "TTL": 60, "ResourceRecords": [ { "Value": "192.0.2.45" } - ] + ], + "HealthCheckId": "abcdef66-7777-8888-9999-000000fedcba" } - }, + } + ] + } + }, + "output": { + "ChangeInfo": { + "Comment": "EC2 instances for example.com", + "Id": "/change/C2682N5HXP0BZ4", + "Status": "PENDING", + "SubmittedAt": "2017-02-10T01:36:41.958Z" + } + } + }, + { + "title": "To create a basic resource record set", + "documentation": "The following example creates a resource record set that routes Internet traffic to a resource with an IP address of 192.0.2.44.", + "input": { + "HostedZoneId": "Z3M3LMPEXAMPLE", + "ChangeBatch": { + "Comment": "Web server for example.com", + "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Europe", - "GeoLocation": { - "ContinentCode": "EU" - }, "TTL": 60, "ResourceRecords": [ { - "Value": "192.0.2.46" - } - ] - } - }, - { - "Action": "CREATE", - "ResourceRecordSet": { - "Name": "example.com", - "Type": "A", - "SetIdentifier": "Other locations", - "GeoLocation": { - "CountryCode": "*" - }, - "TTL": 60, - "ResourceRecords": [ - { - "Value": "192.0.2.47" + "Value": "192.0.2.44" } ] } @@ -2621,7 +2605,7 @@ }, "output": { "ChangeInfo": { - "Comment": "Geolocation configuration for example.com", + "Comment": "Web server for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -2629,22 +2613,20 @@ } }, { - "title": "To create geolocation alias resource record sets", - "documentation": "The following example creates four geolocation alias resource record sets that route traffic to ELB load balancers. Traffic is routed to one of four IP addresses, for North America (NA), for South America (SA), for Europe (EU), and for all other locations (*).", + "title": "To create weighted alias resource record sets", + "documentation": "The following example creates two weighted alias resource record sets that route traffic to ELB load balancers. The resource with a Weight of 100 will get 1/3rd of traffic (100/100+200), and the other resource will get the rest of the traffic for example.com.", "input": { "HostedZoneId": "Z3M3LMPEXAMPLE", "ChangeBatch": { - "Comment": "Geolocation alias configuration for example.com", + "Comment": "ELB load balancers for example.com", "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "North America", - "GeoLocation": { - "ContinentCode": "NA" - }, + "SetIdentifier": "Ohio region", + "Weight": 100, "AliasTarget": { "HostedZoneId": "Z3AADJGX6KTTL2", "DNSName": "example-com-123456789.us-east-2.elb.amazonaws.com ", @@ -2657,31 +2639,49 @@ "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "South America", - "GeoLocation": { - "ContinentCode": "SA" - }, + "SetIdentifier": "Oregon region", + "Weight": 200, "AliasTarget": { - "HostedZoneId": "Z2P70J7HTTTPLU", - "DNSName": "example-com-234567890.sa-east-1.elb.amazonaws.com ", + "HostedZoneId": "Z1H1FL5HABSF5", + "DNSName": "example-com-987654321.us-west-2.elb.amazonaws.com ", "EvaluateTargetHealth": true } } - }, + } + ] + } + }, + "output": { + "ChangeInfo": { + "Comment": "ELB load balancers for example.com", + "Id": "/change/C2682N5HXP0BZ4", + "Status": "PENDING", + "SubmittedAt": "2017-02-10T01:36:41.958Z" + } + } + }, + { + "title": "To create weighted resource record sets", + "documentation": "The following example creates two weighted resource record sets. The resource with a Weight of 100 will get 1/3rd of traffic (100/100+200), and the other resource will get the rest of the traffic for example.com.", + "input": { + "HostedZoneId": "Z3M3LMPEXAMPLE", + "ChangeBatch": { + "Comment": "Web servers for example.com", + "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Europe", - "GeoLocation": { - "ContinentCode": "EU" - }, - "AliasTarget": { - "HostedZoneId": "Z215JYRZR1TBD5", - "DNSName": "example-com-234567890.eu-central-1.elb.amazonaws.com ", - "EvaluateTargetHealth": true - } + "SetIdentifier": "Seattle data center", + "Weight": 100, + "TTL": 60, + "ResourceRecords": [ + { + "Value": "192.0.2.44" + } + ], + "HealthCheckId": "abcdef11-2222-3333-4444-555555fedcba" } }, { @@ -2689,15 +2689,15 @@ "ResourceRecordSet": { "Name": "example.com", "Type": "A", - "SetIdentifier": "Other locations", - "GeoLocation": { - "CountryCode": "*" - }, - "AliasTarget": { - "HostedZoneId": "Z1LMS91P8CMLE5", - "DNSName": "example-com-234567890.ap-southeast-1.elb.amazonaws.com ", - "EvaluateTargetHealth": true - } + "SetIdentifier": "Portland data center", + "Weight": 200, + "TTL": 60, + "ResourceRecords": [ + { + "Value": "192.0.2.45" + } + ], + "HealthCheckId": "abcdef66-7777-8888-9999-000000fedcba" } } ] @@ -2705,7 +2705,7 @@ }, "output": { "ChangeInfo": { - "Comment": "Geolocation alias configuration for example.com", + "Comment": "Web servers for example.com", "Id": "/change/C2682N5HXP0BZ4", "Status": "PENDING", "SubmittedAt": "2017-02-10T01:36:41.958Z" @@ -6383,7 +6383,22 @@ "method": "GET", "uri": "/2013-04-01/hostedzone/{Id}", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "GetHostedZoneFailure", + "params": { + "Id": "fake-zone" + }, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.route53#GetHostedZoneCount": { @@ -8547,7 +8562,20 @@ "outputToken": "NextMarker", "items": "HostedZones", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListHostedZonesSuccess", + "params": {}, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.route53#ListHostedZonesByName": { diff --git a/models/s3.json b/models/s3.json index 9bfb1d7b40..bbef1c6b28 100644 --- a/models/s3.json +++ b/models/s3.json @@ -60,7 +60,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.

\n

To verify that all parts have been removed and prevent getting charged for the part\n storage, you should call the ListParts API operation and ensure that\n the parts list is empty.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to AbortMultipartUpload:

\n ", + "smithy.api#documentation": "

This operation aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.

\n

To verify that all parts have been removed and prevent getting charged for the part\n storage, you should call the ListParts API operation and ensure that\n the parts list is empty.

\n \n
    \n
  • \n

    \n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. \n To delete these in-progress multipart uploads, use the\n ListMultipartUploads operation to list the in-progress multipart\n uploads in the bucket and use the AbortMultupartUpload operation to\n abort all the in-progress multipart uploads.\n

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to AbortMultipartUpload:

\n ", "smithy.api#examples": [ { "title": "To abort a multipart upload", @@ -18694,7 +18694,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an object that is already stored in Amazon S3.

\n \n

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.

\n
\n

You can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n

Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.

\n \n

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer\n Acceleration.

\n
\n
\n
Authentication and authorization
\n
\n

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have\n read access to the source object and write\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key can't be set to ReadOnly on the copy destination bucket.

      \n
    • \n
    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Response and special errors
\n
\n

When the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length. You always need to read the entire response body\n to check if the copy succeeds.

\n
    \n
  • \n

    If the copy is successful, you receive a response with information about the copied\n object.

    \n
  • \n
  • \n

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK response can contain either a success or an error.

    \n
      \n
    • \n

      If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.

      \n
    • \n
    • \n

      If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      \n

      If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).

      \n
    • \n
    \n
  • \n
\n
\n
Charge
\n
\n

The copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CopyObject:

\n ", + "smithy.api#documentation": "

Creates a copy of an object that is already stored in Amazon S3.

\n \n

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.

\n
\n

You can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.

\n \n
    \n
  • \n

    Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

    \n
  • \n
\n
\n

Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.

\n \n

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer\n Acceleration.

\n
\n
\n
Authentication and authorization
\n
\n

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have\n read access to the source object and write\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key can't be set to ReadOnly on the copy destination bucket.

      \n
    • \n
    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Response and special errors
\n
\n

When the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length. You always need to read the entire response body\n to check if the copy succeeds.

\n
    \n
  • \n

    If the copy is successful, you receive a response with information about the copied\n object.

    \n
  • \n
  • \n

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK response can contain either a success or an error.

    \n
      \n
    • \n

      If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.

      \n
    • \n
    • \n

      If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      \n

      If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).

      \n
    • \n
    \n
  • \n
\n
\n
Charge
\n
\n

The copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CopyObject:

\n ", "smithy.api#examples": [ { "title": "To copy an object", @@ -19891,7 +19891,7 @@ } }, "traits": { - "smithy.api#documentation": "

The container element for specifying the default Object Lock retention settings for new\n objects placed in the specified bucket.

\n \n
    \n
  • \n

    The DefaultRetention settings require both a mode and a\n period.

    \n
  • \n
  • \n

    The DefaultRetention period can be either Days or\n Years but you must select one. You cannot specify\n Days and Years at the same time.

    \n
  • \n
\n
" + "smithy.api#documentation": "

The container element for optionally specifying the default Object Lock retention settings for new\n objects placed in the specified bucket.

\n \n
    \n
  • \n

    The DefaultRetention settings require both a mode and a\n period.

    \n
  • \n
  • \n

    The DefaultRetention period can be either Days or\n Years but you must select one. You cannot specify\n Days and Years at the same time.

    \n
  • \n
\n
" } }, "com.amazonaws.s3#Delete": { @@ -21258,7 +21258,7 @@ } }, "traits": { - "smithy.api#documentation": "

Requests Amazon S3 to encode the object keys in the response and specifies the encoding\n method to use. An object key can contain any Unicode character; however, the XML 1.0 parser\n cannot parse some characters, such as characters with an ASCII value from 0 to 10. For\n characters that are not supported in XML 1.0, you can add this parameter to request that\n Amazon S3 encode the keys in the response.

" + "smithy.api#documentation": "

Encoding type used by Amazon S3 to encode the object keys in the response.\n Responses are encoded only in UTF-8. An object key can contain any Unicode character.\n However, the XML 1.0 parser can't parse certain characters, such as characters with an\n ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this\n parameter to request that Amazon S3 encode the keys in the response. For more information about\n characters to avoid in object key names, see Object key naming\n guidelines.

\n \n

When using the URL encoding type, non-ASCII characters that are used in an object's\n key name will be percent-encoded according to UTF-8 code values. For example, the object\n test_file(3).png will appear as\n test_file%283%29.png.

\n
" } }, "com.amazonaws.s3#Encryption": { @@ -21299,7 +21299,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies encryption-related information for an Amazon S3 bucket that is a destination for\n replicated objects.

" + "smithy.api#documentation": "

Specifies encryption-related information for an Amazon S3 bucket that is a destination for\n replicated objects.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n
" } }, "com.amazonaws.s3#End": { @@ -24638,7 +24638,7 @@ } ], "traits": { - "smithy.api#documentation": "

You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission\n to access it.

\n

If the bucket does not exist or you do not have permission to access it, the\n HEAD request returns a generic 400 Bad Request, 403\n Forbidden or 404 Not Found code. A message body is not included, so\n you cannot determine the exception beyond these HTTP response codes.

\n \n

\n Directory buckets - You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Authentication and authorization
\n
\n

All HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory bucket - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", + "smithy.api#documentation": "

You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission\n to access it.

\n \n

If the bucket does not exist or you do not have permission to access it, the\n HEAD request returns a generic 400 Bad Request, 403\n Forbidden or 404 Not Found code. A message body is not included, so\n you cannot determine the exception beyond these HTTP response codes.

\n
\n
\n
Authentication and authorization
\n
\n

\n General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n \n

You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
", "smithy.api#examples": [ { "title": "To determine if bucket exists", @@ -24705,14 +24705,14 @@ "BucketRegion": { "target": "com.amazonaws.s3#Region", "traits": { - "smithy.api#documentation": "

The Region that the bucket is located.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

The Region that the bucket is located.

", "smithy.api#httpHeader": "x-amz-bucket-region" } }, "AccessPointAlias": { "target": "com.amazonaws.s3#AccessPointAlias", "traits": { - "smithy.api#documentation": "

Indicates whether the bucket name used in the request is an access point alias.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the bucket name used in the request is an access point alias.

\n \n

For directory buckets, the value of this field is false.

\n
", "smithy.api#httpHeader": "x-amz-access-point-alias" } } @@ -24761,7 +24761,7 @@ } ], "traits": { - "smithy.api#documentation": "

The HEAD operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.

\n

A HEAD request has the same options as a GET operation on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not\n Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. \n It's not possible to retrieve the exact exception of these error codes.

\n

Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

\n
    \n
  • \n

    \n General purpose bucket permissions - To\n use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.

    \n

    If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
\n
\n
Versioning
\n
\n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - Delete marker is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following actions are related to HeadObject:

\n ", + "smithy.api#documentation": "

The HEAD operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.

\n \n

A HEAD request has the same options as a GET operation on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not\n Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. \n It's not possible to retrieve the exact exception of these error codes.

\n
\n

Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.

\n
\n
Permissions
\n
\n

\n
    \n
  • \n

    \n General purpose bucket permissions - To\n use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.

    \n

    If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
\n
\n
Versioning
\n
\n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - Delete marker is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n \n

For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
\n

The following actions are related to HeadObject:

\n ", "smithy.api#http": { "method": "HEAD", "uri": "/{Bucket}/{Key+}", @@ -26404,7 +26404,7 @@ "com.amazonaws.s3#ListBuckets": { "type": "operation", "input": { - "target": "smithy.api#Unit" + "target": "com.amazonaws.s3#ListBucketsRequest" }, "output": { "target": "com.amazonaws.s3#ListBucketsOutput" @@ -26441,6 +26441,12 @@ "method": "GET", "uri": "/?x-id=ListBuckets", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "ContinuationToken", + "outputToken": "ContinuationToken", + "items": "Buckets", + "pageSize": "MaxBuckets" } } }, @@ -26458,6 +26464,12 @@ "traits": { "smithy.api#documentation": "

The owner of the buckets listed.

" } + }, + "ContinuationToken": { + "target": "com.amazonaws.s3#NextToken", + "traits": { + "smithy.api#documentation": "

\n ContinuationToken is included in the\n response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket.

" + } } }, "traits": { @@ -26465,6 +26477,28 @@ "smithy.api#xmlName": "ListAllMyBucketsResult" } }, + "com.amazonaws.s3#ListBucketsRequest": { + "type": "structure", + "members": { + "MaxBuckets": { + "target": "com.amazonaws.s3#MaxBuckets", + "traits": { + "smithy.api#documentation": "

Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.

", + "smithy.api#httpQuery": "max-buckets" + } + }, + "ContinuationToken": { + "target": "com.amazonaws.s3#Token", + "traits": { + "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key. You can use this ContinuationToken for pagination of the list results.

\n

Length Constraints: Minimum length of 0. Maximum length of 1024.

\n

Required: No.

", + "smithy.api#httpQuery": "continuation-token" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.s3#ListDirectoryBuckets": { "type": "operation", "input": { @@ -26519,7 +26553,7 @@ "ContinuationToken": { "target": "com.amazonaws.s3#DirectoryBucketToken", "traits": { - "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key. You can use this ContinuationToken for pagination of the list results.

", + "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on buckets in this account with a token. ContinuationToken is obfuscated and is not a real\n bucket name. You can use this ContinuationToken for the pagination of the list results.

", "smithy.api#httpQuery": "continuation-token" } }, @@ -26544,7 +26578,7 @@ "target": "com.amazonaws.s3#ListMultipartUploadsOutput" }, "traits": { - "smithy.api#documentation": "

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a\n multipart upload that has been initiated by the CreateMultipartUpload request, but\n has not yet been completed or aborted.

\n \n

\n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.\n

\n
\n

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart\n uploads is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads request parameter. If there are more than 1,000 multipart uploads that \n satisfy your ListMultipartUploads request, the response returns an IsTruncated element\n with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. \n To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. \n In these requests, include two query parameters: key-marker and upload-id-marker. \n Set the value of key-marker to the NextKeyMarker value from the previous response. \n Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

\n \n

\n Directory buckets - The upload-id-marker element and \n the NextUploadIdMarker element aren't supported by directory buckets. \n To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Sorting of multipart uploads in response
\n
\n
    \n
  • \n

    \n General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    \n
      \n
    • \n

      Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

      \n
    • \n
    • \n

      Time-based sorting - For uploads that share the same object key, \n they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys. \n \n

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to ListMultipartUploads:

\n ", + "smithy.api#documentation": "

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a\n multipart upload that has been initiated by the CreateMultipartUpload request, but\n has not yet been completed or aborted.

\n \n

\n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. \n To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart\n uploads in the bucket and use the AbortMultupartUpload operation to abort all the in-progress multipart uploads.\n

\n
\n

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart\n uploads is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads request parameter. If there are more than 1,000 multipart uploads that \n satisfy your ListMultipartUploads request, the response returns an IsTruncated element\n with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. \n To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. \n In these requests, include two query parameters: key-marker and upload-id-marker. \n Set the value of key-marker to the NextKeyMarker value from the previous response. \n Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

\n \n

\n Directory buckets - The upload-id-marker element and \n the NextUploadIdMarker element aren't supported by directory buckets. \n To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Sorting of multipart uploads in response
\n
\n
    \n
  • \n

    \n General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    \n
      \n
    • \n

      Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

      \n
    • \n
    • \n

      Time-based sorting - For uploads that share the same object key, \n they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys. \n \n

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to ListMultipartUploads:

\n ", "smithy.api#examples": [ { "title": "To list in-progress multipart uploads on a bucket", @@ -27081,7 +27115,7 @@ "EncodingType": { "target": "com.amazonaws.s3#EncodingType", "traits": { - "smithy.api#documentation": "

Encoding type used by Amazon S3 to encode object keys in the response. If using\n url, non-ASCII characters used in an object's key name will be URL encoded.\n For example, the object test_file(3).png will appear as\n test_file%283%29.png.

" + "smithy.api#documentation": "

Encoding type used by Amazon S3 to encode the object keys in the response.\n Responses are encoded only in UTF-8. An object key can contain any Unicode character.\n However, the XML 1.0 parser can't parse certain characters, such as characters with an\n ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this\n parameter to request that Amazon S3 encode the keys in the response. For more information about\n characters to avoid in object key names, see Object key naming\n guidelines.

\n \n

When using the URL encoding type, non-ASCII characters that are used in an object's\n key name will be percent-encoded according to UTF-8 code values. For example, the object\n test_file(3).png will appear as\n test_file%283%29.png.

\n
" } }, "RequestCharged": { @@ -27187,7 +27221,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can\n use the request parameters as selection criteria to return a subset of the objects in a\n bucket. A 200 OK response can contain valid or invalid XML. Make sure to\n design your application to parse the contents of the response and handle it appropriately.\n \n For more information about listing objects, see Listing object keys\n programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform\n the s3:ListBucket action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Sorting order of returned objects
\n
\n
    \n
  • \n

    \n General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

    \n
  • \n
  • \n

    \n Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n \n

This section describes the latest revision of this action. We recommend that you use\n this revised API operation for application development. For backward compatibility, Amazon S3\n continues to support the prior version of this API operation, ListObjects.

\n
\n

The following operations are related to ListObjectsV2:

\n ", + "smithy.api#documentation": "

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can\n use the request parameters as selection criteria to return a subset of the objects in a\n bucket. A 200 OK response can contain valid or invalid XML. Make sure to\n design your application to parse the contents of the response and handle it appropriately.\n \n For more information about listing objects, see Listing object keys\n programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

\n \n
    \n
  • \n

    \n General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't return prefixes that are related only to in-progress multipart uploads.

    \n
  • \n
  • \n

    \n Directory buckets - \n For directory buckets, ListObjectsV2 response includes the prefixes that are related only to in-progress multipart uploads.\n

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform\n the s3:ListBucket action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Sorting order of returned objects
\n
\n
    \n
  • \n

    \n General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

    \n
  • \n
  • \n

    \n Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n \n

This section describes the latest revision of this action. We recommend that you use\n this revised API operation for application development. For backward compatibility, Amazon S3\n continues to support the prior version of this API operation, ListObjects.

\n
\n

The following operations are related to ListObjectsV2:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?list-type=2", @@ -27313,7 +27347,7 @@ "EncodingType": { "target": "com.amazonaws.s3#EncodingType", "traits": { - "smithy.api#documentation": "

Encoding type used by Amazon S3 to encode object keys in the response. If using\n url, non-ASCII characters used in an object's key name will be URL encoded.\n For example, the object test_file(3).png will appear as\n test_file%283%29.png.

", + "smithy.api#documentation": "

Encoding type used by Amazon S3 to encode the object keys in the response.\n Responses are encoded only in UTF-8. An object key can contain any Unicode character.\n However, the XML 1.0 parser can't parse certain characters, such as characters with an\n ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this\n parameter to request that Amazon S3 encode the keys in the response. For more information about\n characters to avoid in object key names, see Object key naming\n guidelines.

\n \n

When using the URL encoding type, non-ASCII characters that are used in an object's\n key name will be percent-encoded according to UTF-8 code values. For example, the object\n test_file(3).png will appear as\n test_file%283%29.png.

\n
", "smithy.api#httpQuery": "encoding-type" } }, @@ -27710,6 +27744,15 @@ "com.amazonaws.s3#MaxAgeSeconds": { "type": "integer" }, + "com.amazonaws.s3#MaxBuckets": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, "com.amazonaws.s3#MaxDirectoryBuckets": { "type": "integer", "traits": { @@ -28935,7 +28978,7 @@ "PartitionDateSource": { "target": "com.amazonaws.s3#PartitionDateSource", "traits": { - "smithy.api#documentation": "

Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.

" + "smithy.api#documentation": "

Specifies the partition date source for the partitioned prefix.\n PartitionDateSource can be EventTime or\n DeliveryTime.

\n

For DeliveryTime, the time in the log file names corresponds to the\n delivery time for the log files.

\n

For EventTime, The logs delivered are for a specific day only. The year,\n month, and day correspond to the day on which the event occurred, and the hour, minutes and\n seconds are set to 00 in the key.

" } } }, @@ -29120,7 +29163,7 @@ "RestrictPublicBuckets": { "target": "com.amazonaws.s3#Setting", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE restricts access to this bucket to only Amazon Web Service principals and authorized users within this account if the bucket has\n a public policy.

\n

Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.

", + "smithy.api#documentation": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has\n a public policy.

\n

Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.

", "smithy.api#xmlName": "RestrictPublicBuckets" } } @@ -29519,7 +29562,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action uses the encryption subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.

\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

\n \n

This action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n

To use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to PutBucketEncryption:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action uses the encryption subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.

\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n

Also, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n

To use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to PutBucketEncryption:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?encryption", @@ -30502,7 +30545,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the versioning state of an existing bucket.

\n

You can set the versioning state with one of the following values:

\n

\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.

\n

\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.

\n

If the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.

\n

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request header and the Status and the\n MfaDelete request elements in a request to set the versioning state of the\n bucket.

\n \n

If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.

\n
\n

The following operations are related to PutBucketVersioning:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n \n

When you enable versioning on a bucket for the first time, it might take a short\n amount of time for the change to be fully propagated. We recommend that you wait for 15\n minutes after enabling versioning before issuing write operations\n (PUT\n or\n DELETE)\n on objects in the bucket.

\n
\n

Sets the versioning state of an existing bucket.

\n

You can set the versioning state with one of the following values:

\n

\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.

\n

\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.

\n

If the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.

\n

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request header and the Status and the\n MfaDelete request elements in a request to set the versioning state of the\n bucket.

\n \n

If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.

\n
\n

The following operations are related to PutBucketVersioning:

\n ", "smithy.api#examples": [ { "title": "Set versioning configuration on a bucket", @@ -31945,7 +31988,7 @@ "Payload": { "target": "com.amazonaws.s3#Body", "traits": { - "smithy.api#documentation": "

The byte array of partial, one or more result records.

", + "smithy.api#documentation": "

The byte array of partial, one or more result records. S3 Select doesn't guarantee that\n a record will be self-contained in one record frame. To ensure continuous streaming of\n data, S3 Select might split the same record across multiple record frames instead of\n aggregating the results in memory. Some S3 clients (for example, the SDK for Java) handle this behavior by creating a ByteStream out of the response by\n default. Other clients might not handle this behavior by default. In those cases, you must\n aggregate the results on the client side and parse the response.

", "smithy.api#eventPayload": {} } } @@ -33018,7 +33061,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more\n information, see PUT Bucket encryption in\n the Amazon S3 API Reference.

" + "smithy.api#documentation": "

Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more\n information, see PUT Bucket encryption in\n the Amazon S3 API Reference.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n
" } }, "com.amazonaws.s3#ServerSideEncryptionConfiguration": { @@ -33055,7 +33098,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the default server-side encryption configuration.

" + "smithy.api#documentation": "

Specifies the default server-side encryption configuration.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n
" } }, "com.amazonaws.s3#ServerSideEncryptionRules": { diff --git a/models/sagemaker.json b/models/sagemaker.json index 09b64b049f..984fbb79a2 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -2586,6 +2586,18 @@ "smithy.api#documentation": "

Lists a summary of the properties of an association. An association is an entity that\n links other lineage or experiment entities. An example would be an association between a\n training job and a model.

" } }, + "com.amazonaws.sagemaker#AssumableRoleArns": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#RoleArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.sagemaker#AsyncInferenceClientConfig": { "type": "structure", "members": { @@ -3052,7 +3064,7 @@ "target": "com.amazonaws.sagemaker#AutoMLAlgorithms", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.

\n
    \n
  • \n

    \n For the tabular problem type TabularJobConfig:\n

    \n \n

    Selected algorithms must belong to the list corresponding to the training mode\n set in AutoMLJobConfig.Mode (ENSEMBLING or\n HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

    \n
    \n
      \n
    • \n

      In ENSEMBLING mode:

      \n
        \n
      • \n

        \"catboost\"

        \n
      • \n
      • \n

        \"extra-trees\"

        \n
      • \n
      • \n

        \"fastai\"

        \n
      • \n
      • \n

        \"lightgbm\"

        \n
      • \n
      • \n

        \"linear-learner\"

        \n
      • \n
      • \n

        \"nn-torch\"

        \n
      • \n
      • \n

        \"randomforest\"

        \n
      • \n
      • \n

        \"xgboost\"

        \n
      • \n
      \n
    • \n
    • \n

      In HYPERPARAMETER_TUNING mode:

      \n
        \n
      • \n

        \"linear-learner\"

        \n
      • \n
      • \n

        \"mlp\"

        \n
      • \n
      • \n

        \"xgboost\"

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n For the time-series forecasting problem type TimeSeriesForecastingJobConfig:\n

    \n
      \n
    • \n

      Choose your algorithms from this list.

      \n
        \n
      • \n

        \"cnn-qr\"

        \n
      • \n
      • \n

        \"deepar\"

        \n
      • \n
      • \n

        \"prophet\"

        \n
      • \n
      • \n

        \"arima\"

        \n
      • \n
      • \n

        \"npts\"

        \n
      • \n
      • \n

        \"ets\"

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.

\n
    \n
  • \n

    \n For the tabular problem type\n TabularJobConfig:\n

    \n \n

    Selected algorithms must belong to the list corresponding to the training mode\n set in AutoMLJobConfig.Mode (ENSEMBLING or\n HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

    \n
    \n
      \n
    • \n

      In ENSEMBLING mode:

      \n
        \n
      • \n

        \"catboost\"

        \n
      • \n
      • \n

        \"extra-trees\"

        \n
      • \n
      • \n

        \"fastai\"

        \n
      • \n
      • \n

        \"lightgbm\"

        \n
      • \n
      • \n

        \"linear-learner\"

        \n
      • \n
      • \n

        \"nn-torch\"

        \n
      • \n
      • \n

        \"randomforest\"

        \n
      • \n
      • \n

        \"xgboost\"

        \n
      • \n
      \n
    • \n
    • \n

      In HYPERPARAMETER_TUNING mode:

      \n
        \n
      • \n

        \"linear-learner\"

        \n
      • \n
      • \n

        \"mlp\"

        \n
      • \n
      • \n

        \"xgboost\"

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n For the time-series forecasting problem type\n TimeSeriesForecastingJobConfig:\n

    \n
      \n
    • \n

      Choose your algorithms from this list.

      \n
        \n
      • \n

        \"cnn-qr\"

        \n
      • \n
      • \n

        \"deepar\"

        \n
      • \n
      • \n

        \"prophet\"

        \n
      • \n
      • \n

        \"arima\"

        \n
      • \n
      • \n

        \"npts\"

        \n
      • \n
      • \n

        \"ets\"

        \n
      • \n
      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } } @@ -3186,7 +3198,7 @@ "AlgorithmsConfig": { "target": "com.amazonaws.sagemaker#AutoMLAlgorithmsConfig", "traits": { - "smithy.api#documentation": "

Stores the configuration information for the selection of algorithms trained on tabular data.

\n

The list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode\n .

\n
    \n
  • \n

    \n AlgorithmsConfig should not be set if the training mode is set on AUTO.

    \n
  • \n
  • \n

    When AlgorithmsConfig is provided, one AutoMLAlgorithms\n attribute must be set and one only.

    \n

    If the list of algorithms provided as values for AutoMLAlgorithms is\n empty, CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
\n

For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.

\n

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "smithy.api#documentation": "

Stores the configuration information for the selection of algorithms trained on tabular\n data.

\n

The list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode\n .

\n
    \n
  • \n

    \n AlgorithmsConfig should not be set if the training mode is set on\n AUTO.

    \n
  • \n
  • \n

    When AlgorithmsConfig is provided, one AutoMLAlgorithms\n attribute must be set and one only.

    \n

    If the list of algorithms provided as values for AutoMLAlgorithms is\n empty, CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
\n

For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.

\n

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" } } }, @@ -3295,6 +3307,20 @@ } } }, + "com.amazonaws.sagemaker#AutoMLComputeConfig": { + "type": "structure", + "members": { + "EmrServerlessComputeConfig": { + "target": "com.amazonaws.sagemaker#EmrServerlessComputeConfig", + "traits": { + "smithy.api#documentation": "

The configuration for using EMR Serverless\n to run the AutoML job V2.

\n

To allow your AutoML job V2 to automatically initiate a remote job on EMR Serverless\n when additional compute resources are needed to process large datasets, you need to provide\n an EmrServerlessComputeConfig object, which includes an\n ExecutionRoleARN attribute, to the AutoMLComputeConfig of the\n AutoML job V2 input request.

\n

By seamlessly transitioning to EMR Serverless when required, the AutoML job can handle\n datasets that would otherwise exceed the initially provisioned resources, without any\n manual intervention from you.

\n

EMR Serverless is available for the tabular and time series problem types. We\n recommend setting up this option for tabular datasets larger than 5 GB and time series\n datasets larger than 30 GB.

" + } + } + }, + "traits": { + "smithy.api#documentation": "\n

This data type is intended for use exclusively by SageMaker Canvas and cannot be used in\n other contexts at the moment.

\n
\n

Specifies the compute configuration for an AutoML job V2.

" + } + }, "com.amazonaws.sagemaker#AutoMLContainerDefinition": { "type": "structure", "members": { @@ -4171,7 +4197,7 @@ "target": "com.amazonaws.sagemaker#S3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon S3 output path. Must be 128 characters or less.

", + "smithy.api#documentation": "

The Amazon S3 output path. Must be 512 characters or less.

", "smithy.api#required": {} } } @@ -5055,7 +5081,7 @@ "AlgorithmsConfig": { "target": "com.amazonaws.sagemaker#AutoMLAlgorithmsConfig", "traits": { - "smithy.api#documentation": "

Your Autopilot job trains a default set of algorithms on your dataset. For tabular and\n time-series data, you can customize the algorithm list by selecting a subset of algorithms\n for your problem type.

\n

\n AlgorithmsConfig stores the customized selection of algorithms to train on\n your data.

\n
    \n
  • \n

    \n For the tabular problem type TabularJobConfig,\n the list of available algorithms to choose from depends on the training mode set\n in \n AutoMLJobConfig.Mode\n .

    \n
      \n
    • \n

      \n AlgorithmsConfig should not be set when the training mode\n AutoMLJobConfig.Mode is set to AUTO.

      \n
    • \n
    • \n

      When AlgorithmsConfig is provided, one\n AutoMLAlgorithms attribute must be set and one only.

      \n

      If the list of algorithms provided as values for\n AutoMLAlgorithms is empty,\n CandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

      \n
    • \n
    • \n

      When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

      \n
    • \n
    \n

    For the list of all algorithms per training mode, see \n AlgorithmConfig.

    \n

    For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.

    \n
  • \n
  • \n

    \n For the time-series forecasting problem type TimeSeriesForecastingJobConfig,\n choose your algorithms from the list provided in\n \n AlgorithmConfig.

    \n

    For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.

    \n
      \n
    • \n

      When AlgorithmsConfig is provided, one\n AutoMLAlgorithms attribute must be set and one only.

      \n

      If the list of algorithms provided as values for\n AutoMLAlgorithms is empty,\n CandidateGenerationConfig uses the full set of algorithms for\n time-series forecasting.

      \n
    • \n
    • \n

      When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for\n time-series forecasting.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Your Autopilot job trains a default set of algorithms on your dataset. For tabular and\n time-series data, you can customize the algorithm list by selecting a subset of algorithms\n for your problem type.

\n

\n AlgorithmsConfig stores the customized selection of algorithms to train on\n your data.

\n
    \n
  • \n

    \n For the tabular problem type\n TabularJobConfig, the list of available algorithms to\n choose from depends on the training mode set in \n AutoMLJobConfig.Mode\n .

    \n
      \n
    • \n

      \n AlgorithmsConfig should not be set when the training mode\n AutoMLJobConfig.Mode is set to AUTO.

      \n
    • \n
    • \n

      When AlgorithmsConfig is provided, one\n AutoMLAlgorithms attribute must be set and one only.

      \n

      If the list of algorithms provided as values for\n AutoMLAlgorithms is empty,\n CandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

      \n
    • \n
    • \n

      When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

      \n
    • \n
    \n

    For the list of all algorithms per training mode, see \n AlgorithmConfig.

    \n

    For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.

    \n
  • \n
  • \n

    \n For the time-series forecasting problem type\n TimeSeriesForecastingJobConfig, choose your algorithms\n from the list provided in \n AlgorithmConfig.

    \n

    For more information on each algorithm, see the Algorithms\n support for time-series forecasting section in the Autopilot developer\n guide.

    \n
      \n
    • \n

      When AlgorithmsConfig is provided, one\n AutoMLAlgorithms attribute must be set and one only.

      \n

      If the list of algorithms provided as values for\n AutoMLAlgorithms is empty,\n CandidateGenerationConfig uses the full set of algorithms for\n time-series forecasting.

      \n
    • \n
    • \n

      When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for\n time-series forecasting.

      \n
    • \n
    \n
  • \n
" } } }, @@ -5242,6 +5268,12 @@ "traits": { "smithy.api#documentation": "

The generative AI settings for the SageMaker Canvas application.

" } + }, + "EmrServerlessSettings": { + "target": "com.amazonaws.sagemaker#EmrServerlessSettings", + "traits": { + "smithy.api#documentation": "

The settings for running Amazon EMR Serverless data processing jobs in SageMaker Canvas.

" + } } }, "traits": { @@ -8907,7 +8939,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

\n \n

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" + "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

\n

An AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.

\n \n

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" } }, "com.amazonaws.sagemaker#CreateAutoMLJobRequest": { @@ -9019,7 +9051,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

\n \n

\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

For the list of available problem types supported by CreateAutoMLJobV2, see\n AutoMLProblemTypeConfig.

\n

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" + "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

\n

An AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.

\n

AutoML jobs V2 support various problem types such as regression, binary, and multiclass\n classification with tabular data, text and image classification, time-series forecasting,\n and fine-tuning of large language models (LLMs) for text generation.

\n \n

\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

For the list of available problem types supported by CreateAutoMLJobV2, see\n AutoMLProblemTypeConfig.

\n

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" } }, "com.amazonaws.sagemaker#CreateAutoMLJobV2Request": { @@ -9094,6 +9126,12 @@ "traits": { "smithy.api#documentation": "

This structure specifies how to split the data into train and validation\n datasets.

\n

The validation and training datasets must contain the same headers. For jobs created by\n calling CreateAutoMLJob, the validation dataset must be less than 2 GB in\n size.

\n \n

This attribute must not be set for the time-series forecasting problem type, as Autopilot\n automatically splits the input dataset into training and validation sets.

\n
" } + }, + "AutoMLComputeConfig": { + "target": "com.amazonaws.sagemaker#AutoMLComputeConfig", + "traits": { + "smithy.api#documentation": "

Specifies the compute configuration for the AutoML job V2.

" + } } }, "traits": { @@ -18091,6 +18129,12 @@ "traits": { "smithy.api#documentation": "

Returns the security configuration for traffic encryption or Amazon VPC\n settings.

" } + }, + "AutoMLComputeConfig": { + "target": "com.amazonaws.sagemaker#AutoMLComputeConfig", + "traits": { + "smithy.api#documentation": "

The compute configuration used for the AutoML job V2.

" + } } }, "traits": { @@ -27145,6 +27189,62 @@ "smithy.api#pattern": "^\\d+$" } }, + "com.amazonaws.sagemaker#EmrServerlessComputeConfig": { + "type": "structure", + "members": { + "ExecutionRoleARN": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the IAM role granting the AutoML job V2 the necessary\n permissions access policies to list, connect to, or manage EMR Serverless jobs. For\n detailed information about the required permissions of this role, see \"How to configure\n AutoML to initiate a remote job on EMR Serverless for large datasets\" in Create a regression or classification job for tabular data using the AutoML API\n or Create an AutoML job for time-series forecasting using the API.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "\n

This data type is intended for use exclusively by SageMaker Canvas and cannot be used in\n other contexts at the moment.

\n
\n

Specifies the compute configuration for the EMR Serverless job.

" + } + }, + "com.amazonaws.sagemaker#EmrServerlessSettings": { + "type": "structure", + "members": { + "ExecutionRoleArn": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services IAM role that is assumed for\n running Amazon EMR Serverless jobs in SageMaker Canvas. This role should have the necessary\n permissions to read and write data attached and a trust relationship with\n EMR Serverless.

" + } + }, + "Status": { + "target": "com.amazonaws.sagemaker#FeatureStatus", + "traits": { + "smithy.api#documentation": "

Describes whether Amazon EMR Serverless job capabilities are enabled or disabled in the SageMaker\n Canvas application.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The settings for running Amazon EMR Serverless jobs in SageMaker Canvas.

" + } + }, + "com.amazonaws.sagemaker#EmrSettings": { + "type": "structure", + "members": { + "AssumableRoleArns": { + "target": "com.amazonaws.sagemaker#AssumableRoleArns", + "traits": { + "smithy.api#documentation": "

An array of Amazon Resource Names (ARNs) of the IAM roles that the execution role of\n SageMaker can assume for performing operations or tasks related to Amazon EMR clusters or Amazon EMR\n Serverless applications. These roles define the permissions and access policies required\n when performing Amazon EMR-related operations, such as listing, connecting to, or terminating\n Amazon EMR clusters or Amazon EMR Serverless applications. They are typically used in\n cross-account access scenarios, where the Amazon EMR resources (clusters or serverless\n applications) are located in a different Amazon Web Services account than the SageMaker\n domain.

" + } + }, + "ExecutionRoleArns": { + "target": "com.amazonaws.sagemaker#ExecutionRoleArns", + "traits": { + "smithy.api#documentation": "

An array of Amazon Resource Names (ARNs) of the IAM roles used by the Amazon EMR cluster instances\n or job execution environments to access other Amazon Web Services services and resources needed during the \n runtime of your Amazon EMR or Amazon EMR Serverless workloads, such as Amazon S3 for data access, Amazon CloudWatch for logging, or other\n Amazon Web Services services based on the particular workload requirements.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration parameters that specify the IAM roles assumed by the execution role of \n SageMaker (assumable roles) and the cluster instances or job execution environments \n (execution roles or runtime roles) to manage and access resources required for running Amazon EMR\n clusters or Amazon EMR Serverless applications.

" + } + }, "com.amazonaws.sagemaker#EnableCapture": { "type": "boolean" }, @@ -27349,6 +27449,20 @@ } } }, + "com.amazonaws.sagemaker#EndpointConfigStepMetadata": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.sagemaker#EndpointConfigArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the endpoint configuration used in the step.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Metadata for an endpoint configuration step.

" + } + }, "com.amazonaws.sagemaker#EndpointConfigSummary": { "type": "structure", "members": { @@ -27730,6 +27844,20 @@ } } }, + "com.amazonaws.sagemaker#EndpointStepMetadata": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.sagemaker#EndpointArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the endpoint in the step.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Metadata for an endpoint step.

" + } + }, "com.amazonaws.sagemaker#EndpointSummary": { "type": "structure", "members": { @@ -27918,6 +28046,18 @@ } } }, + "com.amazonaws.sagemaker#ExecutionRoleArns": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#RoleArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig": { "type": "enum", "members": { @@ -35095,6 +35235,12 @@ "traits": { "smithy.api#documentation": "

A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.

" } + }, + "EmrSettings": { + "target": "com.amazonaws.sagemaker#EmrSettings", + "traits": { + "smithy.api#documentation": "

The configuration parameters that specify the IAM roles assumed by the execution role of \n SageMaker (assumable roles) and the cluster instances or job execution environments \n (execution roles or runtime roles) to manage and access resources required for running Amazon EMR\n clusters or Amazon EMR Serverless applications.

" + } } }, "traits": { @@ -44263,6 +44409,12 @@ "traits": { "smithy.api#enumValue": "Projects" } + }, + "INFERENCE_OPTIMIZATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InferenceOptimization" + } } } }, @@ -50842,6 +50994,18 @@ "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AutoML job that was run by this step.

" } + }, + "Endpoint": { + "target": "com.amazonaws.sagemaker#EndpointStepMetadata", + "traits": { + "smithy.api#documentation": "

The endpoint that was invoked during this step execution.

" + } + }, + "EndpointConfig": { + "target": "com.amazonaws.sagemaker#EndpointConfigStepMetadata", + "traits": { + "smithy.api#documentation": "

The endpoint configuration used to create an endpoint during this step execution.

" + } } }, "traits": { @@ -52105,9 +52269,7 @@ "LocalPath": { "target": "com.amazonaws.sagemaker#ProcessingLocalPath", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The local path of a directory where you want Amazon SageMaker to upload its contents to Amazon S3. \n LocalPath is an absolute path to a directory containing output files. \n This directory will be created by the platform and exist when your container's \n entrypoint is invoked.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The local path of a directory where you want Amazon SageMaker to upload its contents to Amazon S3. \n LocalPath is an absolute path to a directory containing output files. \n This directory will be created by the platform and exist when your container's \n entrypoint is invoked.

" } }, "S3UploadMode": { @@ -52290,7 +52452,7 @@ "InferenceAmiVersion": { "target": "com.amazonaws.sagemaker#ProductionVariantInferenceAmiVersion", "traits": { - "smithy.api#documentation": "

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI)\n images. Each image is configured by Amazon Web Services with a set of software and driver\n versions. Amazon Web Services optimizes these configurations for different machine\n learning workloads.

\n

By selecting an AMI version, you can ensure that your inference environment is\n compatible with specific software requirements, such as CUDA driver versions, Linux\n kernel versions, or Amazon Web Services Neuron driver versions.

" + "smithy.api#documentation": "

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI)\n images. Each image is configured by Amazon Web Services with a set of software and driver\n versions. Amazon Web Services optimizes these configurations for different machine\n learning workloads.

\n

By selecting an AMI version, you can ensure that your inference environment is\n compatible with specific software requirements, such as CUDA driver versions, Linux\n kernel versions, or Amazon Web Services Neuron driver versions.

\n

The AMI version names, and their configurations, are the following:

\n
\n
al2-ami-sagemaker-inference-gpu-2
\n
\n
    \n
  • \n

    Accelerator: GPU

    \n
  • \n
  • \n

    NVIDIA driver version: 535.54.03

    \n
  • \n
  • \n

    CUDA driver version: 12.2

    \n
  • \n
  • \n

    Supported instance types: ml.g4dn.*, ml.g5.*, ml.g6.*, ml.p3.*,\n ml.p4d.*, ml.p4de.*, ml.p5.*

    \n
  • \n
\n
\n
" } } }, diff --git a/models/sesv2.json b/models/sesv2.json index 1820531dca..98594547b2 100644 --- a/models/sesv2.json +++ b/models/sesv2.json @@ -8231,8 +8231,7 @@ "UseCaseDescription": { "target": "com.amazonaws.sesv2#UseCaseDescription", "traits": { - "smithy.api#documentation": "

A description of the types of email that you plan to send.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A description of the types of email that you plan to send.

" } }, "AdditionalContactEmailAddresses": { @@ -12462,8 +12461,11 @@ "com.amazonaws.sesv2#UseCaseDescription": { "type": "string", "traits": { + "smithy.api#deprecated": { + "message": "Use case description is optional and deprecated" + }, "smithy.api#length": { - "min": 1, + "min": 0, "max": 5000 }, "smithy.api#sensitive": {} diff --git a/models/sfn.json b/models/sfn.json index ec94cc0dae..09f12b100c 100644 --- a/models/sfn.json +++ b/models/sfn.json @@ -158,7 +158,7 @@ "name": "states" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "Step Functions\n

Step Functions is a service that lets you coordinate the components of distributed applications\n and microservices using visual workflows.

\n

You can use Step Functions to build applications from individual components, each of which performs\n a discrete function, or task, allowing you to scale and change\n applications quickly. Step Functions provides a console that helps visualize the components of your\n application as a series of steps. Step Functions automatically triggers and tracks each step, and\n retries steps when there are errors, so your application executes predictably and in the right\n order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any\n issues.

\n

Step Functions manages operations and underlying infrastructure to ensure your application is\n available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has\n access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API.\n For more information about Step Functions, see the \n Step Functions Developer Guide\n .

\n \n

If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn.

\n
", + "smithy.api#documentation": "Step Functions\n

Step Functions coordinates the components of distributed applications\n and microservices using visual workflows.

\n

You can use Step Functions to build applications from individual components, each of which performs\n a discrete function, or task, allowing you to scale and change\n applications quickly. Step Functions provides a console that helps visualize the components of your\n application as a series of steps. Step Functions automatically triggers and tracks each step, and\n retries steps when there are errors, so your application executes predictably and in the right\n order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any\n issues.

\n

Step Functions manages operations and underlying infrastructure to ensure your application is\n available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has\n access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API.\n For more information about Step Functions, see the \n Step Functions Developer Guide\n .

\n \n

If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn.

\n
", "smithy.api#title": "AWS Step Functions", "smithy.api#xmlNamespace": { "uri": "http://swf.amazonaws.com/doc/2015-07-20/" @@ -1176,6 +1176,18 @@ } } }, + "com.amazonaws.sfn#ActivityAlreadyExists": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Activity already exists. EncryptionConfiguration may not be updated.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.sfn#ActivityDoesNotExist": { "type": "structure", "members": { @@ -1522,12 +1534,24 @@ "target": "com.amazonaws.sfn#CreateActivityOutput" }, "errors": [ + { + "target": "com.amazonaws.sfn#ActivityAlreadyExists" + }, { "target": "com.amazonaws.sfn#ActivityLimitExceeded" }, + { + "target": "com.amazonaws.sfn#InvalidEncryptionConfiguration" + }, { "target": "com.amazonaws.sfn#InvalidName" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#TooManyTags" } @@ -1552,6 +1576,12 @@ "traits": { "smithy.api#documentation": "

The list of tags to add to a resource.

\n

An array of key-value pairs. For more information, see Using\n Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User\n Guide, and Controlling Access Using IAM\n Tags.

\n

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Settings to configure server-side encryption.

" + } } }, "traits": { @@ -1598,6 +1628,9 @@ { "target": "com.amazonaws.sfn#InvalidDefinition" }, + { + "target": "com.amazonaws.sfn#InvalidEncryptionConfiguration" + }, { "target": "com.amazonaws.sfn#InvalidLoggingConfiguration" }, @@ -1607,6 +1640,12 @@ { "target": "com.amazonaws.sfn#InvalidTracingConfiguration" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#StateMachineAlreadyExists" }, @@ -1627,7 +1666,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a state machine. A state machine consists of a collection of states that can do\n work (Task states), determine to which states to transition next\n (Choice states), stop an execution with an error (Fail states),\n and so on. State machines are specified using a JSON-based, structured language. For more\n information, see Amazon States\n Language in the Step Functions User Guide.

\n

If you set the publish parameter of this API action to true, it\n publishes version 1 as the first revision of the state machine.

\n \n

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

\n
\n \n

\n CreateStateMachine is an idempotent API. Subsequent requests won’t create a\n duplicate resource if it was already created. CreateStateMachine's idempotency\n check is based on the state machine name, definition,\n type, LoggingConfiguration, and\n TracingConfiguration. The check is also based on the publish and versionDescription parameters. If a following request has a different\n roleArn or tags, Step Functions will ignore these differences and treat\n it as an idempotent request of the previous. In this case, roleArn and\n tags will not be updated, even if they are different.

\n
", + "smithy.api#documentation": "

Creates a state machine. A state machine consists of a collection of states that can do\n work (Task states), determine to which states to transition next\n (Choice states), stop an execution with an error (Fail states),\n and so on. State machines are specified using a JSON-based, structured language. For more\n information, see Amazon States\n Language in the Step Functions User Guide.

\n

If you set the publish parameter of this API action to true, it\n publishes version 1 as the first revision of the state machine.

\n

\n For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine. The execution history and state machine definition will be encrypted with the key applied to the State Machine.\n

\n \n

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

\n
\n \n

\n CreateStateMachine is an idempotent API. Subsequent requests won’t create a\n duplicate resource if it was already created. CreateStateMachine's idempotency\n check is based on the state machine name, definition,\n type, LoggingConfiguration,\n TracingConfiguration, and EncryptionConfiguration The check is also based on the publish and versionDescription parameters. If a following request has a different\n roleArn or tags, Step Functions will ignore these differences and treat\n it as an idempotent request of the previous. In this case, roleArn and\n tags will not be updated, even if they are different.

\n
", "smithy.api#idempotent": {} } }, @@ -1776,6 +1815,12 @@ "traits": { "smithy.api#documentation": "

Sets description about the state machine version. You can only set the description if the publish parameter is set to true. Otherwise, if you set versionDescription, but publish to false, this API action throws ValidationException.

" } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Settings to configure server-side encryption.

" + } } }, "traits": { @@ -1969,7 +2014,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a state machine version. After\n you delete a version, you can't call StartExecution using that version's ARN\n or use\n the\n version with a state machine alias.

\n \n

Deleting a state machine version won't terminate its in-progress executions.

\n
\n \n

You can't delete a state machine version currently referenced by one or more aliases. Before you delete a version, you must either delete the aliases or update them to point to another state machine version.

\n
\n

\n Related operations:\n

\n " + "smithy.api#documentation": "

Deletes a state machine version. After\n you delete a version, you can't call StartExecution using that version's ARN\n or use the version with a state machine alias.

\n \n

Deleting a state machine version won't terminate its in-progress executions.

\n
\n \n

You can't delete a state machine version currently referenced by one or more aliases. Before you delete a version, you must either delete the aliases or update them to point to another state machine version.

\n
\n

\n Related operations:\n

\n " } }, "com.amazonaws.sfn#DeleteStateMachineVersionInput": { @@ -2052,6 +2097,12 @@ "smithy.api#documentation": "

The date the activity is created.

", "smithy.api#required": {} } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for configured server-side encryption.

" + } } }, "traits": { @@ -2072,6 +2123,15 @@ }, { "target": "com.amazonaws.sfn#InvalidArn" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { @@ -2087,6 +2147,12 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the execution to describe.

", "smithy.api#required": {} } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call DescribeStateMachine API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

" + } } }, "traits": { @@ -2358,6 +2424,15 @@ { "target": "com.amazonaws.sfn#InvalidArn" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#StateMachineDoesNotExist" } @@ -2462,10 +2537,19 @@ }, { "target": "com.amazonaws.sfn#InvalidArn" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { - "smithy.api#documentation": "

Provides information about a state machine's definition, its execution role ARN, and\n configuration. If a Map Run dispatched the execution, this action returns the Map Run\n Amazon Resource Name (ARN) in the response.\n The\n state machine returned is the state machine associated with the\n Map Run.

\n \n

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

\n
\n

This API action is not supported by EXPRESS state machines.

" + "smithy.api#documentation": "

Provides information about a state machine's definition, its execution role ARN, and\n configuration. If a Map Run dispatched the execution, this action returns the Map Run\n Amazon Resource Name (ARN) in the response. The state machine returned is the state machine associated with the\n Map Run.

\n \n

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

\n
\n

This API action is not supported by EXPRESS state machines.

" } }, "com.amazonaws.sfn#DescribeStateMachineForExecutionInput": { @@ -2477,6 +2561,12 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the execution you want state machine information for.

", "smithy.api#required": {} } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

" + } } }, "traits": { @@ -2547,6 +2637,12 @@ "traits": { "smithy.api#documentation": "

The revision identifier for the state machine. The first revision ID when you create the state machine is null.

\n

Use the state machine revisionId parameter to compare the revision of a state machine with the configuration of the state machine used for executions without performing a diff of the properties, such as definition and roleArn.

" } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Settings to configure server-side encryption.

" + } } }, "traits": { @@ -2562,6 +2658,12 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the state machine for which you want the information.

\n

If you specify a state machine version ARN, this API returns details about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1.

", "smithy.api#required": {} } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

\n \n

\n When calling a labelled ARN for an encrypted state machine, the includedData = METADATA_ONLY parameter will not apply because Step Functions needs to decrypt the entire state machine definition to get the Distributed Map state’s definition. In this case, the API caller needs to have kms:Decrypt permission.\n

\n
" + } } }, "traits": { @@ -2594,7 +2696,7 @@ "definition": { "target": "com.amazonaws.sfn#Definition", "traits": { - "smithy.api#documentation": "

The Amazon States Language definition of the state machine. See Amazon States Language.

", + "smithy.api#documentation": "

The Amazon States Language definition of the state machine. See Amazon States Language.

\n

If called with includedData = METADATA_ONLY, the returned definition will be {}.

", "smithy.api#required": {} } }, @@ -2645,6 +2747,12 @@ "traits": { "smithy.api#documentation": "

The description of the state machine version.

" } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Settings to configure server-side encryption.

" + } } }, "traits": { @@ -2657,6 +2765,50 @@ "smithy.api#default": false } }, + "com.amazonaws.sfn#EncryptionConfiguration": { + "type": "structure", + "members": { + "kmsKeyId": { + "target": "com.amazonaws.sfn#KmsKeyId", + "traits": { + "smithy.api#documentation": "

An alias, alias ARN, key ID, or key ARN of a symmetric encryption KMS key to encrypt data. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

" + } + }, + "kmsDataKeyReusePeriodSeconds": { + "target": "com.amazonaws.sfn#KmsDataKeyReusePeriodSeconds", + "traits": { + "smithy.api#documentation": "

Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call GenerateDataKey. Only applies to customer managed keys.

" + } + }, + "type": { + "target": "com.amazonaws.sfn#EncryptionType", + "traits": { + "smithy.api#documentation": "

Encryption type

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings to configure server-side encryption.

\n

\n For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines and activities. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine, and when creating an Activity. The execution history and state machine definition will be encrypted with the key applied to the State Machine. Activity inputs will be encrypted with the key applied to the Activity.\n

\n \n

Step Functions automatically enables encryption at rest using Amazon Web Services owned keys at no charge. However, KMS charges apply when using a customer managed key. For more information about pricing, see Key Management Service pricing.

\n
\n

For more information on KMS, see What is Key Management Service?\n

" + } + }, + "com.amazonaws.sfn#EncryptionType": { + "type": "enum", + "members": { + "AWS_OWNED_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_OWNED_KEY" + } + }, + "CUSTOMER_MANAGED_KMS_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMER_MANAGED_KMS_KEY" + } + } + } + }, "com.amazonaws.sfn#ErrorMessage": { "type": "string" }, @@ -3036,6 +3188,15 @@ }, { "target": "com.amazonaws.sfn#InvalidArn" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { @@ -3100,6 +3261,15 @@ }, { "target": "com.amazonaws.sfn#InvalidToken" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { @@ -3810,6 +3980,23 @@ "com.amazonaws.sfn#IncludeExecutionDataGetExecutionHistory": { "type": "boolean" }, + "com.amazonaws.sfn#IncludedData": { + "type": "enum", + "members": { + "ALL_DATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_DATA" + } + }, + "METADATA_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "METADATA_ONLY" + } + } + } + }, "com.amazonaws.sfn#InspectionData": { "type": "structure", "members": { @@ -3990,6 +4177,18 @@ "smithy.api#error": "client" } }, + "com.amazonaws.sfn#InvalidEncryptionConfiguration": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Received when encryptionConfiguration is specified but various conditions exist which make the configuration invalid. For example, if type is set to CUSTOMER_MANAGED_KMS_KEY, but kmsKeyId is null, or kmsDataKeyReusePeriodSeconds is not between 60 and 900, or the KMS key is not symmetric or inactive.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.sfn#InvalidExecutionInput": { "type": "structure", "members": { @@ -4010,7 +4209,7 @@ } }, "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

Configuration is not valid.

", "smithy.api#error": "client" } }, @@ -4062,6 +4261,101 @@ "smithy.api#error": "client" } }, + "com.amazonaws.sfn#KmsAccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Either your KMS key policy or API caller does not have the required permissions.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.sfn#KmsDataKeyReusePeriodSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 60, + "max": 900 + } + } + }, + "com.amazonaws.sfn#KmsInvalidStateException": { + "type": "structure", + "members": { + "kmsKeyState": { + "target": "com.amazonaws.sfn#KmsKeyState", + "traits": { + "smithy.api#documentation": "

Current status of the KMS; key. For example: DISABLED, PENDING_DELETION, PENDING_IMPORT, UNAVAILABLE, CREATING.

" + } + }, + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The KMS key is not in valid state, for example: Disabled or Deleted.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.sfn#KmsKeyId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.sfn#KmsKeyState": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "PENDING_DELETION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_DELETION" + } + }, + "PENDING_IMPORT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_IMPORT" + } + }, + "UNAVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNAVAILABLE" + } + }, + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + } + } + }, + "com.amazonaws.sfn#KmsThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Received when KMS returns ThrottlingException for a KMS call that Step Functions makes on behalf of the caller.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.sfn#LambdaFunctionFailedEventDetails": { "type": "structure", "members": { @@ -5424,7 +5718,7 @@ "target": "com.amazonaws.sfn#VersionWeight", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The percentage of traffic you want to route to a state machine\n version. The sum of the weights in the routing\n configuration must be equal to 100.

", + "smithy.api#documentation": "

The percentage of traffic you want to route to a state machine version. The sum of the\n weights in the routing configuration must be equal to 100.

", "smithy.api#required": {} } } @@ -5445,6 +5739,15 @@ { "target": "com.amazonaws.sfn#InvalidToken" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#TaskDoesNotExist" }, @@ -5453,7 +5756,7 @@ } ], "traits": { - "smithy.api#documentation": "

Used by activity workers, Task states using the callback\n pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed.

" + "smithy.api#documentation": "

Used by activity workers, Task states using the callback\n pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed.

\n

For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role.

\n

A caller can mark a task as fail without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted.

" } }, "com.amazonaws.sfn#SendTaskFailureInput": { @@ -5550,6 +5853,15 @@ { "target": "com.amazonaws.sfn#InvalidToken" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#TaskDoesNotExist" }, @@ -5667,6 +5979,15 @@ { "target": "com.amazonaws.sfn#InvalidName" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#StateMachineDeleting" }, @@ -5695,7 +6016,7 @@ "name": { "target": "com.amazonaws.sfn#Name", "traits": { - "smithy.api#documentation": "

Optional name of the execution.\n This name must be unique for your Amazon Web Services account, Region, and state machine for 90 days. For more information,\n see \n Limits Related to State Machine Executions in the Step Functions Developer Guide.

\n

If you don't provide a name for the execution, Step Functions automatically generates a universally unique identifier (UUID) as the execution name.

\n

A name must not contain:

\n
    \n
  • \n

    white space

    \n
  • \n
  • \n

    brackets < > { } [ ]\n

    \n
  • \n
  • \n

    wildcard characters ? *\n

    \n
  • \n
  • \n

    special characters \" # % \\ ^ | ~ ` $ & , ; : /\n

    \n
  • \n
  • \n

    control characters (U+0000-001F, U+007F-009F)

    \n
  • \n
\n

To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

" + "smithy.api#documentation": "

Optional name of the execution. This name must be unique for your Amazon Web Services account, Region, and state machine for 90 days. For more information,\n see \n Limits Related to State Machine Executions in the Step Functions Developer Guide.

\n

If you don't provide a name for the execution, Step Functions automatically generates a universally unique identifier (UUID) as the execution name.

\n

A name must not contain:

\n
    \n
  • \n

    white space

    \n
  • \n
  • \n

    brackets < > { } [ ]\n

    \n
  • \n
  • \n

    wildcard characters ? *\n

    \n
  • \n
  • \n

    special characters \" # % \\ ^ | ~ ` $ & , ; : /\n

    \n
  • \n
  • \n

    control characters (U+0000-001F, U+007F-009F)

    \n
  • \n
\n

To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

" } }, "input": { @@ -5755,6 +6076,15 @@ { "target": "com.amazonaws.sfn#InvalidName" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#StateMachineDeleting" }, @@ -5799,6 +6129,12 @@ "traits": { "smithy.api#documentation": "

Passes the X-Ray trace header. The trace header can also be passed in the request\n payload.

" } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

" + } } }, "traits": { @@ -6109,7 +6445,7 @@ } }, "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

State machine type is not supported.

", "smithy.api#error": "client" } }, @@ -6165,12 +6501,21 @@ { "target": "com.amazonaws.sfn#InvalidArn" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Stops an execution.

\n

This API action is not supported by EXPRESS state machines.

" + "smithy.api#documentation": "

Stops an execution.

\n

This API action is not supported by EXPRESS state machines.

\n

For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role.

\n

A caller can stop an execution without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted.

" } }, "com.amazonaws.sfn#StopExecutionInput": { @@ -6665,7 +7010,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 1024 + "max": 2048 } } }, @@ -7035,12 +7380,21 @@ { "target": "com.amazonaws.sfn#InvalidDefinition" }, + { + "target": "com.amazonaws.sfn#InvalidEncryptionConfiguration" + }, { "target": "com.amazonaws.sfn#InvalidLoggingConfiguration" }, { "target": "com.amazonaws.sfn#InvalidTracingConfiguration" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#MissingRequiredParameter" }, @@ -7058,7 +7412,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an existing state machine by modifying its definition,\n roleArn, or loggingConfiguration. Running executions will continue\n to use the previous definition and roleArn. You must include at\n least one of definition or roleArn or you will receive a\n MissingRequiredParameter error.

\n

A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

\n

A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

\n

The following are some examples of qualified and unqualified state machine ARNs:

\n
    \n
  • \n

    The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

    \n

    \n arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel\n

    \n \n

    If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

    \n
    \n
  • \n
  • \n

    The following qualified state machine ARN refers to an alias named PROD.

    \n

    \n arn::states:::stateMachine:\n

    \n \n

    If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

    \n
    \n
  • \n
  • \n

    The following unqualified state machine ARN refers to a state machine named myStateMachine.

    \n

    \n arn::states:::stateMachine:\n

    \n
  • \n
\n

After you update your state machine, you can set the publish parameter to\n true in the same action to publish a new version. This\n way, you can opt-in to strict versioning of your state machine.

\n \n

Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.

\n
\n \n

All StartExecution calls within a few seconds use the updated\n definition and roleArn. Executions started immediately after you\n call UpdateStateMachine may use the previous state machine\n definition and roleArn.

\n
", + "smithy.api#documentation": "

Updates an existing state machine by modifying its definition,\n roleArn, loggingConfiguration, or EncryptionConfiguration. Running executions will continue\n to use the previous definition and roleArn. You must include at\n least one of definition or roleArn or you will receive a\n MissingRequiredParameter error.

\n

A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

\n

A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

\n

The following are some examples of qualified and unqualified state machine ARNs:

\n
    \n
  • \n

    The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

    \n

    \n arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel\n

    \n \n

    If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

    \n
    \n
  • \n
  • \n

    The following qualified state machine ARN refers to an alias named PROD.

    \n

    \n arn::states:::stateMachine:\n

    \n \n

    If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

    \n
    \n
  • \n
  • \n

    The following unqualified state machine ARN refers to a state machine named myStateMachine.

    \n

    \n arn::states:::stateMachine:\n

    \n
  • \n
\n

After you update your state machine, you can set the publish parameter to\n true in the same action to publish a new version. This\n way, you can opt-in to strict versioning of your state machine.

\n \n

Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.

\n
\n \n

All StartExecution calls within a few seconds use the updated\n definition and roleArn. Executions started immediately after you\n call UpdateStateMachine may use the previous state machine\n definition and roleArn.

\n
", "smithy.api#idempotent": {} } }, @@ -7179,6 +7533,12 @@ "traits": { "smithy.api#documentation": "

An optional description of the state machine version to publish.

\n

You can only specify the versionDescription parameter if you've set publish to true.

" } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Settings to configure server-side encryption.

" + } } }, "traits": { diff --git a/models/ssm-quicksetup.json b/models/ssm-quicksetup.json new file mode 100644 index 0000000000..be928d7205 --- /dev/null +++ b/models/ssm-quicksetup.json @@ -0,0 +1,2223 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.ssmquicksetup#AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The requester has insufficient permissions to perform the operation.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinition": { + "type": "structure", + "members": { + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the Quick Setup configuration.

", + "smithy.api#pattern": "^[a-zA-Z0-9_\\-.:/]{3,200}$", + "smithy.api#required": {} + } + }, + "Parameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "

A list of key-value pairs containing the required parameters for the configuration\n type.

", + "smithy.api#required": {} + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The version of the Quick Setup type used.

", + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "LocalDeploymentExecutionRoleName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the IAM role used to deploy local\n configurations.

", + "smithy.api#pattern": "^[\\w+=,.@-]{1,64}$" + } + }, + "LocalDeploymentAdministrationRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the IAM role used to administrate local configuration\n deployments.

" + } + }, + "Id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the configuration definition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of a Quick Setup configuration.

" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionInput": { + "type": "structure", + "members": { + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the Quick Setup configuration.

", + "smithy.api#pattern": "^[a-zA-Z0-9_\\-.:/]{3,200}$", + "smithy.api#required": {} + } + }, + "Parameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "

The parameters for the configuration definition type. Parameters for configuration\n definitions vary based the configuration type. The following tables outline the\n parameters for each configuration type.

\n
\n
OpsCenter (Type: Amazon Web ServicesQuickSetupType-SSMOpsCenter)
\n
\n
    \n
  • \n

    \n DelegatedAccountId\n

    \n
      \n
    • \n

      Description: (Required) The ID of the\n delegated administrator account.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Resource Scheduler (Type: Amazon Web ServicesQuickSetupType-Scheduler)
\n
\n
    \n
  • \n

    \n TargetTagKey\n

    \n
      \n
    • \n

      Description: (Required) The tag key assigned\n to the instances you want to target.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagValue\n

    \n
      \n
    • \n

      Description: (Required) The value of the tag\n key assigned to the instances you want to\n target.

      \n
    • \n
    \n
  • \n
  • \n

    \n ICalendarString\n

    \n
      \n
    • \n

      Description: (Required) An iCalendar\n formatted string containing the schedule you want\n Change Manager to use.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Default Host Management Configuration (Type: Amazon Web ServicesQuickSetupType-DHMC)
\n
\n
    \n
  • \n

    \n UpdateSSMAgent\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the SSM Agent is updated on the\n target instances every 2 weeks. The default value\n is \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Resource Explorer (Type: Amazon Web ServicesQuickSetupType-ResourceExplorer)
\n
\n
    \n
  • \n

    \n SelectedAggregatorRegion\n

    \n
      \n
    • \n

      Description: (Required) The Amazon Web Services Region where you want to create the\n aggregator index.

      \n
    • \n
    \n
  • \n
  • \n

    \n ReplaceExistingAggregator\n

    \n
      \n
    • \n

      Description: (Required) A boolean value that\n determines whether to demote an existing\n aggregator if it is in a Region that differs from\n the value you specify for the\n SelectedAggregatorRegion.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Change Manager (Type: Amazon Web ServicesQuickSetupType-SSMChangeMgr)
\n
\n
    \n
  • \n

    \n DelegatedAccountId\n

    \n
      \n
    • \n

      Description: (Required) The ID of the\n delegated administrator account.

      \n
    • \n
    \n
  • \n
  • \n

    \n JobFunction\n

    \n
      \n
    • \n

      Description: (Required) The name for the\n Change Manager job function.

      \n
    • \n
    \n
  • \n
  • \n

    \n PermissionType\n

    \n
      \n
    • \n

      Description: (Optional) Specifies whether\n you want to use default administrator permissions\n for the job function role, or provide a custom\n IAM policy. The valid values are\n CustomPermissions and\n AdminPermissions. The default value\n for the parameter is\n CustomerPermissions.

      \n
    • \n
    \n
  • \n
  • \n

    \n CustomPermissions\n

    \n
      \n
    • \n

      Description: (Optional) A JSON string\n containing the IAM policy you want\n your job function to use. You must provide a value\n for this parameter if you specify\n CustomPermissions for the\n PermissionType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
DevOps Guru (Type: Amazon Web ServicesQuickSetupType-DevOpsGuru)
\n
\n
    \n
  • \n

    \n AnalyseAllResources\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether DevOps Guru analyzes all\n CloudFormation stacks in the account. The\n default value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n EnableSnsNotifications\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether DevOps Guru sends\n notifications when an insight is created. The\n default value is \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n EnableSsmOpsItems\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether DevOps Guru creates an\n OpsCenter OpsItem when an insight is created. The\n default value is \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n EnableDriftRemediation\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether a drift remediation schedule is\n used. The default value is\n \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n RemediationSchedule\n

    \n
      \n
    • \n

      Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days),\n rate(14 days), rate(1\n days), and none. The default\n value is \"none\".

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Conformance Packs (Type: Amazon Web ServicesQuickSetupType-CFGCPacks)
\n
\n
    \n
  • \n

    \n DelegatedAccountId\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n delegated administrator account. This parameter is\n required for Organization deployments.

      \n
    • \n
    \n
  • \n
  • \n

    \n RemediationSchedule\n

    \n
      \n
    • \n

      Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days),\n rate(14 days), rate(2\n days), and none. The default\n value is \"none\".

      \n
    • \n
    \n
  • \n
  • \n

    \n CPackNames\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Config conformance\n packs.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the root\n of your Organization. This configuration type\n doesn't currently support choosing specific OUs.\n The configuration will be deployed to all the OUs\n in the Organization.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Config Recording (Type: Amazon Web ServicesQuickSetupType-CFGRecording)
\n
\n
    \n
  • \n

    \n RecordAllResources\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether all supported resources are\n recorded. The default value is\n \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n ResourceTypesToRecord\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of resource types you want to record.

      \n
    • \n
    \n
  • \n
  • \n

    \n RecordGlobalResourceTypes\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether global resources are recorded\n with all resource configurations. The default\n value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n GlobalResourceTypesRegion\n

    \n
      \n
    • \n

      Description: (Optional) Determines the\n Amazon Web Services Region where global resources\n are recorded.

      \n
    • \n
    \n
  • \n
  • \n

    \n UseCustomBucket\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether a custom Amazon S3\n bucket is used for delivery. The default value is\n \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n DeliveryBucketName\n

    \n
      \n
    • \n

      Description: (Optional) The name of the\n Amazon S3 bucket you want Config to deliver configuration snapshots and\n configuration history files to.

      \n
    • \n
    \n
  • \n
  • \n

    \n DeliveryBucketPrefix\n

    \n
      \n
    • \n

      Description: (Optional) The key prefix you\n want to use in the custom Amazon S3\n bucket.

      \n
    • \n
    \n
  • \n
  • \n

    \n NotificationOptions\n

    \n
      \n
    • \n

      Description: (Optional) Determines the\n notification configuration for the recorder. The\n valid values are NoStreaming,\n UseExistingTopic, and\n CreateTopic. The default value is\n NoStreaming.

      \n
    • \n
    \n
  • \n
  • \n

    \n CustomDeliveryTopicAccountId\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account where the Amazon SNS topic you want to use for notifications\n resides. You must specify a value for this\n parameter if you use the\n UseExistingTopic notification\n option.

      \n
    • \n
    \n
  • \n
  • \n

    \n CustomDeliveryTopicName\n

    \n
      \n
    • \n

      Description: (Optional) The name of the\n Amazon SNS topic you want to use for\n notifications. You must specify a value for this\n parameter if you use the\n UseExistingTopic notification\n option.

      \n
    • \n
    \n
  • \n
  • \n

    \n RemediationSchedule\n

    \n
      \n
    • \n

      Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days),\n rate(7 days), rate(1\n days), and none. The default\n value is \"none\".

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the root\n of your Organization. This configuration type\n doesn't currently support choosing specific OUs.\n The configuration will be deployed to all the OUs\n in the Organization.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Host Management (Type: Amazon Web ServicesQuickSetupType-SSMHostMgmt)
\n
\n
    \n
  • \n

    \n UpdateSSMAgent\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the SSM Agent is updated on the\n target instances every 2 weeks. The default value\n is \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n UpdateEc2LaunchAgent\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the EC2 Launch agent is updated\n on the target instances every month. The default\n value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n CollectInventory\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the EC2 Launch agent is updated\n on the target instances every month. The default\n value is \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n ScanInstances\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the target instances are\n scanned daily for available patches. The default\n value is \"true\".

      \n
    • \n
    \n
  • \n
  • \n

    \n InstallCloudWatchAgent\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the Amazon CloudWatch agent\n is installed on the target instances. The default\n value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n UpdateCloudWatchAgent\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the Amazon CloudWatch agent\n is updated on the target instances every month.\n The default value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n IsPolicyAttachAllowed\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether Quick Setup attaches policies\n to instances profiles already associated with the\n target instances. The default value is\n \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetType\n

    \n
      \n
    • \n

      Description: (Optional) Determines how\n instances are targeted for local account\n deployments. Don't specify a value for this\n parameter if you're deploying to OUs. The valid\n values are *,\n InstanceIds,\n ResourceGroups, and\n Tags. Use * to target\n all instances in the account.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetInstances\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of instance IDs. You must provide a value for\n this parameter if you specify\n InstanceIds for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagKey\n

    \n
      \n
    • \n

      Description: (Optional) The tag key assigned\n to the instances you want to target. You must\n provide a value for this parameter if you specify\n Tags for the TargetType\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagValue\n

    \n
      \n
    • \n

      Description: (Optional) The value of the tag\n key assigned to the instances you want to target.\n You must provide a value for this parameter if you\n specify Tags for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n ResourceGroupName\n

    \n
      \n
    • \n

      Description: (Optional) The name of the\n resource group associated with the instances you\n want to target. You must provide a value for this\n parameter if you specify\n ResourceGroups for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Distributor (Type: Amazon Web ServicesQuickSetupType-Distributor)
\n
\n
    \n
  • \n

    \n PackagesToInstall\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of packages you want to install on the target\n instances. The valid values are\n AWSEFSTools, AWSCWAgent,\n and AWSEC2LaunchAgent.

      \n
    • \n
    \n
  • \n
  • \n

    \n RemediationSchedule\n

    \n
      \n
    • \n

      Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days),\n rate(14 days), rate(2\n days), and none. The default\n value is \"rate(30 days)\".

      \n
    • \n
    \n
  • \n
  • \n

    \n IsPolicyAttachAllowed\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether Quick Setup attaches policies\n to instances profiles already associated with the\n target instances. The default value is\n \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetType\n

    \n
      \n
    • \n

      Description: (Optional) Determines how\n instances are targeted for local account\n deployments. Don't specify a value for this\n parameter if you're deploying to OUs. The valid\n values are *,\n InstanceIds,\n ResourceGroups, and\n Tags. Use * to target\n all instances in the account.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetInstances\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of instance IDs. You must provide a value for\n this parameter if you specify\n InstanceIds for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagKey\n

    \n
      \n
    • \n

      Description: (Required) The tag key assigned\n to the instances you want to target. You must\n provide a value for this parameter if you specify\n Tags for the TargetType\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagValue\n

    \n
      \n
    • \n

      Description: (Required) The value of the tag\n key assigned to the instances you want to target.\n You must provide a value for this parameter if you\n specify Tags for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n ResourceGroupName\n

    \n
      \n
    • \n

      Description: (Required) The name of the\n resource group associated with the instances you\n want to target. You must provide a value for this\n parameter if you specify\n ResourceGroups for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
Patch Policy (Type: Amazon Web ServicesQuickSetupType-PatchPolicy)
\n
\n
    \n
  • \n

    \n PatchPolicyName\n

    \n
      \n
    • \n

      Description: (Required) A name for the patch\n policy. The value you provide is applied to target\n Amazon EC2 instances as a tag.

      \n
    • \n
    \n
  • \n
  • \n

    \n SelectedPatchBaselines\n

    \n
      \n
    • \n

      Description: (Required) An array of JSON\n objects containing the information for the patch\n baselines to include in your patch policy.

      \n
    • \n
    \n
  • \n
  • \n

    \n PatchBaselineUseDefault\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether the selected patch baselines\n are all Amazon Web Services provided.

      \n
    • \n
    \n
  • \n
  • \n

    \n ConfigurationOptionsPatchOperation\n

    \n
      \n
    • \n

      Description: (Optional) Determines whether\n target instances scan for available patches, or\n scan and install available patches. The valid\n values are Scan and\n ScanAndInstall. The default value for\n the parameter is Scan.

      \n
    • \n
    \n
  • \n
  • \n

    \n ConfigurationOptionsScanValue\n

    \n
      \n
    • \n

      Description: (Optional) A cron expression\n that is used as the schedule for when instances\n scan for available patches.

      \n
    • \n
    \n
  • \n
  • \n

    \n ConfigurationOptionsInstallValue\n

    \n
      \n
    • \n

      Description: (Optional) A cron expression\n that is used as the schedule for when instances\n install available patches.

      \n
    • \n
    \n
  • \n
  • \n

    \n ConfigurationOptionsScanNextInterval\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether instances should scan for\n available patches at the next cron interval. The\n default value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n ConfigurationOptionsInstallNextInterval\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether instances should scan for\n available patches at the next cron interval. The\n default value is \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n RebootOption\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether instances are rebooted after\n patches are installed. The default value is\n \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n IsPolicyAttachAllowed\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether Quick Setup attaches policies\n to instances profiles already associated with the\n target instances. The default value is\n \"false\".

      \n
    • \n
    \n
  • \n
  • \n

    \n OutputLogEnableS3\n

    \n
      \n
    • \n

      Description: (Optional) A boolean value that\n determines whether command output logs are sent to\n Amazon S3.

      \n
    • \n
    \n
  • \n
  • \n

    \n OutputS3Location\n

    \n
      \n
    • \n

      Description: (Optional) A JSON string\n containing information about the Amazon S3\n bucket where you want to store the output details\n of the request.

      \n
        \n
      • \n

        \n OutputS3BucketRegion\n

        \n
          \n
        • \n

          Description: (Optional) The Amazon Web Services Region where the Amazon S3\n bucket you want Config to deliver\n command output to is located.

          \n
        • \n
        \n
      • \n
      • \n

        \n OutputS3BucketName\n

        \n
          \n
        • \n

          Description: (Optional) The name of the\n Amazon S3 bucket you want Config to deliver command output to.

          \n
        • \n
        \n
      • \n
      • \n

        \n OutputS3KeyPrefix\n

        \n
          \n
        • \n

          Description: (Optional) The key prefix you\n want to use in the custom Amazon S3\n bucket.

          \n
        • \n
        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n TargetType\n

    \n
      \n
    • \n

      Description: (Optional) Determines how\n instances are targeted for local account\n deployments. Don't specify a value for this\n parameter if you're deploying to OUs. The valid\n values are *,\n InstanceIds,\n ResourceGroups, and\n Tags. Use * to target\n all instances in the account.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetInstances\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of instance IDs. You must provide a value for\n this parameter if you specify\n InstanceIds for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagKey\n

    \n
      \n
    • \n

      Description: (Required) The tag key assigned\n to the instances you want to target. You must\n provide a value for this parameter if you specify\n Tags for the TargetType\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetTagValue\n

    \n
      \n
    • \n

      Description: (Required) The value of the tag\n key assigned to the instances you want to target.\n You must provide a value for this parameter if you\n specify Tags for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n ResourceGroupName\n

    \n
      \n
    • \n

      Description: (Required) The name of the\n resource group associated with the instances you\n want to target. You must provide a value for this\n parameter if you specify\n ResourceGroups for the\n TargetType parameter.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetAccounts\n

    \n
      \n
    • \n

      Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts or\n TargetOrganizationalUnits.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetOrganizationalUnits\n

    \n
      \n
    • \n

      Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
  • \n

    \n TargetRegions\n

    \n
      \n
    • \n

      Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.

      \n
    • \n
    \n
  • \n
\n
\n
", + "smithy.api#required": {} + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The version of the Quick Setup type to use.

", + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "LocalDeploymentExecutionRoleName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the IAM role used to deploy local\n configurations.

", + "smithy.api#pattern": "^[\\w+=,.@-]{1,64}$" + } + }, + "LocalDeploymentAdministrationRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the IAM role used to administrate local configuration\n deployments.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines the preferences and options for a configuration definition.

" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummary" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummary": { + "type": "structure", + "members": { + "Id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the configuration definition.

" + } + }, + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the Quick Setup configuration used by the configuration\n definition.

" + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The version of the Quick Setup type used by the configuration definition.

" + } + }, + "FirstClassParameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "

The common parameters and values for the configuration definition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summarized definition of a Quick Setup configuration definition.

" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsInputList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionInput" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinition" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationManagerList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationManagerSummary" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationManagerSummary": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the Quick Setup configuration.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the configuration.

" + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the configuration

" + } + }, + "StatusSummaries": { + "target": "com.amazonaws.ssmquicksetup#StatusSummariesList", + "traits": { + "smithy.api#documentation": "

Summaries of the state of the configuration manager. These summaries include an\n aggregate of the statuses from the configuration definition associated with the\n configuration manager. This includes deployment statuses, association statuses,\n drift statuses, health checks, and more.

" + } + }, + "ConfigurationDefinitionSummaries": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummariesList", + "traits": { + "smithy.api#documentation": "

A summary of the Quick Setup configuration definition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summary of a Quick Setup configuration manager.

" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationParametersMap": { + "type": "map", + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]+$" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "max": 40960 + } + } + } + }, + "com.amazonaws.ssmquicksetup#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

Another request is being processed. Wait a few minutes and try again.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.ssmquicksetup#CreateConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#CreateConfigurationManagerInput" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#CreateConfigurationManagerOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a Quick Setup configuration manager resource. This object is a collection\n of desired state configurations for multiple configuration definitions and\n summaries describing the deployments of those definitions.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/configurationManager" + } + } + }, + "com.amazonaws.ssmquicksetup#CreateConfigurationManagerInput": { + "type": "structure", + "members": { + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A name for the configuration manager.

", + "smithy.api#pattern": "^[ A-Za-z0-9._-]{0,120}$" + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A description of the configuration manager.

", + "smithy.api#pattern": "^.{0,512}$" + } + }, + "ConfigurationDefinitions": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsInputList", + "traits": { + "smithy.api#documentation": "

The definition of the Quick Setup configuration that the configuration manager\n deploys.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.ssmquicksetup#TagsMap", + "traits": { + "smithy.api#documentation": "

Key-value pairs of metadata to assign to the configuration manager.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#CreateConfigurationManagerOutput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN for the newly created configuration manager.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#DeleteConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#DeleteConfigurationManagerInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a configuration manager.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/configurationManager/{ManagerArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#DeleteConfigurationManagerInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the configuration manager.

", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#Filter": { + "type": "structure", + "members": { + "Key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The key for the filter.

", + "smithy.api#length": { + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]*$", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.ssmquicksetup#FilterValues", + "traits": { + "smithy.api#documentation": "

The values for the filter keys.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A key-value pair to filter results.

" + } + }, + "com.amazonaws.ssmquicksetup#FilterValues": { + "type": "list", + "member": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]*$" + } + } + }, + "com.amazonaws.ssmquicksetup#FiltersList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#Filter" + } + }, + "com.amazonaws.ssmquicksetup#GetConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#GetConfigurationManagerInput" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#GetConfigurationManagerOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a configuration manager.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/configurationManager/{ManagerArn}" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "GetConfigurationManagerFailure", + "params": { + "ManagerArn": "arn:aws:ssm-quicksetup:us-east-1:602768233532:configuration-manager/7cac1a1b-64a9-4c9a-97e8-8c68928b8f13" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.ssmquicksetup#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.ssmquicksetup#GetConfigurationManagerInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the configuration manager.

", + "smithy.api#httpLabel": {}, + "smithy.api#length": { + "min": 1 + }, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#GetConfigurationManagerOutput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the configuration manager.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the configuration manager.

" + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the configuration manager.

" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The datetime stamp when the configuration manager was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "LastModifiedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The datetime stamp when the configuration manager was last updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "StatusSummaries": { + "target": "com.amazonaws.ssmquicksetup#StatusSummariesList", + "traits": { + "smithy.api#documentation": "

A summary of the state of the configuration manager. This includes deployment\n statuses, association statuses, drift statuses, health checks, and more.

" + } + }, + "ConfigurationDefinitions": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsList", + "traits": { + "smithy.api#documentation": "

The configuration definitions association with the configuration manager.

" + } + }, + "Tags": { + "target": "com.amazonaws.ssmquicksetup#TagsMap", + "traits": { + "smithy.api#documentation": "

Key-value pairs of metadata to assign to the configuration manager.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#GetServiceSettings": { + "type": "operation", + "input": { + "target": "smithy.api#Unit" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#GetServiceSettingsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns settings configured for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/serviceSettings" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ssmquicksetup#GetServiceSettingsOutput": { + "type": "structure", + "members": { + "ServiceSettings": { + "target": "com.amazonaws.ssmquicksetup#ServiceSettings", + "traits": { + "smithy.api#documentation": "

Returns details about the settings for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#IAMRoleArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "type": "AWS::IAM::Role" + } + } + }, + "com.amazonaws.ssmquicksetup#InternalServerException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

An error occurred on the server side.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.ssmquicksetup#ListConfigurationManagers": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#ListConfigurationManagersInput" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#ListConfigurationManagersOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns Quick Setup configuration managers.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/listConfigurationManagers" + }, + "smithy.api#paginated": { + "items": "ConfigurationManagersList", + "inputToken": "StartingToken", + "outputToken": "NextToken", + "pageSize": "MaxItems" + }, + "smithy.test#smokeTests": [ + { + "id": "ListConfigurationManagersSuccess", + "params": {}, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.ssmquicksetup#ListConfigurationManagersInput": { + "type": "structure", + "members": { + "StartingToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The token to use when requesting a specific set of items from a list.

", + "smithy.api#length": { + "max": 1024 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]*$" + } + }, + "MaxItems": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Specifies the maximum number of configuration managers that are returned by the\n request.

", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "Filters": { + "target": "com.amazonaws.ssmquicksetup#FiltersList", + "traits": { + "smithy.api#documentation": "

Filters the results returned by the request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#ListConfigurationManagersOutput": { + "type": "structure", + "members": { + "ConfigurationManagersList": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationManagerList", + "traits": { + "smithy.api#documentation": "

The configuration managers returned by the request.

" + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The token to use when requesting the next set of configuration managers. If there\n are no additional operations to return, the string is empty.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#ListQuickSetupTypes": { + "type": "operation", + "input": { + "target": "smithy.api#Unit" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#ListQuickSetupTypesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the available Quick Setup types.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/listQuickSetupTypes" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListQuickSetupTypesSuccess", + "params": {}, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.ssmquicksetup#ListQuickSetupTypesOutput": { + "type": "structure", + "members": { + "QuickSetupTypeList": { + "target": "com.amazonaws.ssmquicksetup#QuickSetupTypeList", + "traits": { + "smithy.api#documentation": "

An array of Quick Setup types.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns tags assigned to the resource.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ssmquicksetup#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the resource the tag is assigned to.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.ssmquicksetup#Tags", + "traits": { + "smithy.api#documentation": "

Key-value pairs of metadata assigned to the resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#QuickSetup": { + "type": "service", + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.ssmquicksetup#CreateConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#DeleteConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#GetConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#GetServiceSettings" + }, + { + "target": "com.amazonaws.ssmquicksetup#ListConfigurationManagers" + }, + { + "target": "com.amazonaws.ssmquicksetup#ListQuickSetupTypes" + }, + { + "target": "com.amazonaws.ssmquicksetup#ListTagsForResource" + }, + { + "target": "com.amazonaws.ssmquicksetup#TagResource" + }, + { + "target": "com.amazonaws.ssmquicksetup#UntagResource" + }, + { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinition" + }, + { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#UpdateServiceSettings" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "SSM QuickSetup", + "arnNamespace": "ssm-quicksetup", + "cloudTrailEventSource": "ssm-quicksetup.amazonaws.com" + }, + "aws.auth#sigv4": { + "name": "ssm-quicksetup" + }, + "aws.iam#supportedPrincipalTypes": [ + "Root", + "IAMUser", + "IAMRole", + "FederatedUser" + ], + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "Authorization", + "Content-Type", + "Credentials", + "X-Amz-Date", + "X-Api-Key", + "X-Amz-Security-Token", + "x-amz-content-sha256", + "X-Amz-User-Agent" + ] + }, + "smithy.api#documentation": "

Quick Setup helps you quickly configure frequently used services and features with\n recommended best practices. Quick Setup simplifies setting up services, including\n Systems Manager, by automating common or recommended tasks.

", + "smithy.api#title": "AWS Systems Manager QuickSetup", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.ssmquicksetup#QuickSetupTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#QuickSetupTypeOutput" + } + }, + "com.amazonaws.ssmquicksetup#QuickSetupTypeOutput": { + "type": "structure", + "members": { + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the Quick Setup configuration.

" + } + }, + "LatestVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The latest version number of the configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the Quick Setup type.

" + } + }, + "com.amazonaws.ssmquicksetup#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The resource couldn't be found. Check the ID or name and try again.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.ssmquicksetup#ServiceSettings": { + "type": "structure", + "members": { + "ExplorerEnablingRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

The IAM role used to enable Explorer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings configured for Quick Setup.

" + } + }, + "com.amazonaws.ssmquicksetup#Status": { + "type": "enum", + "members": { + "INITIALIZING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INITIALIZING" + } + }, + "DEPLOYING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEPLOYING" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "STOPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_FAILED" + } + }, + "STOP_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOP_FAILED" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.ssmquicksetup#StatusDetails": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.ssmquicksetup#StatusSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#StatusSummary" + } + }, + "com.amazonaws.ssmquicksetup#StatusSummary": { + "type": "structure", + "members": { + "StatusType": { + "target": "com.amazonaws.ssmquicksetup#StatusType", + "traits": { + "smithy.api#documentation": "

The type of a status summary.

", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.ssmquicksetup#Status", + "traits": { + "smithy.api#documentation": "

The current status.

" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

When applicable, returns an informational message relevant to the current status and status type of the status summary object. We don't recommend implementing parsing logic around this value since the messages returned can vary in format.

" + } + }, + "LastUpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The datetime stamp when the status was last updated.

", + "smithy.api#required": {}, + "smithy.api#timestampFormat": "date-time" + } + }, + "StatusDetails": { + "target": "com.amazonaws.ssmquicksetup#StatusDetails", + "traits": { + "smithy.api#documentation": "

Details about the status.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summarized description of the status.

" + } + }, + "com.amazonaws.ssmquicksetup#StatusType": { + "type": "enum", + "members": { + "DEPLOYMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deployment" + } + }, + "ASYNC_EXECUTIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AsyncExecutions" + } + } + } + }, + "com.amazonaws.ssmquicksetup#TagEntry": { + "type": "structure", + "members": { + "Key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The key for the tag.

", + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "Value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value for the tag.

", + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + } + }, + "traits": { + "smithy.api#documentation": "

Key-value pairs of metadata.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.ssmquicksetup#TagKeys": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.ssmquicksetup#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#TagResourceInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Assigns key-value pairs of metadata to Amazon Web Services resources.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#TagResourceInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the resource to tag.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.ssmquicksetup#TagsMap", + "traits": { + "smithy.api#documentation": "

Key-value pairs of metadata to assign to the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#Tags": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#TagEntry" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.ssmquicksetup#TagsMap": { + "type": "map", + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.ssmquicksetup#ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request or operation exceeds the maximum allowed request rate per Amazon Web Services account and Amazon Web Services Region.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.ssmquicksetup#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UntagResourceInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes tags from the specified resource.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UntagResourceInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the resource to remove tags from.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.ssmquicksetup#TagKeys", + "traits": { + "smithy.api#documentation": "

The keys of the tags to remove from the resource.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinition": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinitionInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a Quick Setup configuration definition.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/configurationDefinition/{ManagerArn}/{Id}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinitionInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the configuration manager associated with the definition to\n update.

", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + }, + "Id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the configuration definition you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^[a-z0-9-]{1,20}$", + "smithy.api#required": {} + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The version of the Quick Setup type to use.

", + "smithy.api#pattern": "^\\d{1,3}(\\.\\d{1,3})?$|^LATEST$" + } + }, + "Parameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "

The parameters for the configuration definition type.

" + } + }, + "LocalDeploymentExecutionRoleName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the IAM role used to deploy local\n configurations.

", + "smithy.api#pattern": "^[\\w+=,.@-]{1,64}$" + } + }, + "LocalDeploymentAdministrationRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the IAM role used to administrate local configuration\n deployments.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationManagerInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a Quick Setup configuration manager.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/configurationManager/{ManagerArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationManagerInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the configuration manager.

", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A name for the configuration manager.

", + "smithy.api#pattern": "^[ A-Za-z0-9._-]{0,120}$" + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A description of the configuration manager.

", + "smithy.api#pattern": "^.{0,512}$" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateServiceSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UpdateServiceSettingsInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates settings configured for Quick Setup.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/serviceSettings" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateServiceSettingsInput": { + "type": "structure", + "members": { + "ExplorerEnablingRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

The IAM role used to enable Explorer.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The request is invalid. Verify the values provided for the request parameters are\n accurate.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} \ No newline at end of file diff --git a/models/ssm.json b/models/ssm.json index 20551d6f77..892e047223 100644 --- a/models/ssm.json +++ b/models/ssm.json @@ -6687,7 +6687,7 @@ "RejectedPatchesAction": { "target": "com.amazonaws.ssm#PatchAction", "traits": { - "smithy.api#documentation": "

The action for Patch Manager to take on patches included in the\n RejectedPackages list.

\n
    \n
  • \n

    \n \n ALLOW_AS_DEPENDENCY\n : A package in the\n Rejected patches list is installed only if it is a dependency of another package.\n It is considered compliant with the patch baseline, and its status is reported as\n InstalledOther. This is the default action if no option is specified.

    \n
  • \n
  • \n

    \n BLOCK: Packages in the Rejected\n patches list, and packages that include them as dependencies, aren't installed by\n Patch Manager under any circumstances. If a package was installed before it was added to the\n Rejected patches list, or is installed outside of Patch\n Manager afterward, it's considered noncompliant with the patch baseline and its status is\n reported as InstalledRejected.

    \n
  • \n
" + "smithy.api#documentation": "

The action for Patch Manager to take on patches included in the\n RejectedPackages list.

\n
\n
ALLOW_AS_DEPENDENCY
\n
\n

\n Linux and macOS: A package in the rejected patches list\n is installed only if it is a dependency of another package. It is considered compliant with\n the patch baseline, and its status is reported as INSTALLED_OTHER. This is the\n default action if no option is specified.

\n

\n Windows Server: Windows Server doesn't support the\n concept of package dependencies. If a package in the rejected patches list and already\n installed on the node, its status is reported as INSTALLED_OTHER. Any package not\n already installed on the node is skipped. This is the default action if no option is\n specified.

\n
\n
BLOCK
\n
\n

\n All OSs: Packages in the rejected patches list, and\n packages that include them as dependencies, aren't installed by Patch Manager under any\n circumstances. If a package was installed before it was added to the rejected patches list, or\n is installed outside of Patch Manager afterward, it's considered noncompliant with the patch\n baseline and its status is reported as INSTALLED_REJECTED.

\n
\n
" } }, "Description": { @@ -8769,7 +8769,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides information about one or more of your managed nodes, including the operating system\n platform, SSM Agent version, association status, and IP address. This operation does not return\n information for nodes that are either Stopped or Terminated.

\n

If you specify one or more node IDs, the operation returns information for those managed\n nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you\n specify a node ID that isn't valid or a node that you don't own, you receive an error.

\n \n

The IamRole field returned for this API operation is the Identity and Access Management (IAM) role assigned to on-premises managed nodes. This operation does not\n return the IAM role for EC2 instances.

\n
", + "smithy.api#documentation": "

Provides information about one or more of your managed nodes, including the operating system\n platform, SSM Agent version, association status, and IP address. This operation does not return\n information for nodes that are either Stopped or Terminated.

\n

If you specify one or more node IDs, the operation returns information for those managed\n nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you\n specify a node ID that isn't valid or a node that you don't own, you receive an error.

\n \n

The IamRole field returned for this API operation is the role assigned to an\n Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or\n the role assigned to an on-premises managed node.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -9102,7 +9102,7 @@ } ], "traits": { - "smithy.api#documentation": "

An API operation used by the Systems Manager console to display information about Systems Manager managed nodes.

", + "smithy.api#documentation": "

An API operation used by the Systems Manager console to display information about Systems Manager managed\n nodes.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -9138,7 +9138,7 @@ "MaxResults": { "target": "com.amazonaws.ssm#DescribeInstancePropertiesMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for the call. The call also returns a token that you\n can specify in a subsequent call to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of items to return for the call. The call also returns a token that you\n can specify in a subsequent call to get the next set of results.

" } }, "NextToken": { @@ -9164,7 +9164,7 @@ "NextToken": { "target": "com.amazonaws.ssm#NextToken", "traits": { - "smithy.api#documentation": "

The token for the next set of properties to return. Use this token to get the next set of\n results.

" + "smithy.api#documentation": "

The token for the next set of properties to return. Use this token to get the next set of\n results.

" } } }, @@ -10323,7 +10323,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the properties of available patches organized by product, product family,\n classification, severity, and other properties of available patches. You can use the reported\n properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.

\n

The following section lists the properties that can be used in filters for each major\n operating system type:

\n
\n
AMAZON_LINUX
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
AMAZON_LINUX_2
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
CENTOS
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
DEBIAN
\n
\n

Valid properties: PRODUCT | PRIORITY\n

\n
\n
MACOS
\n
\n

Valid properties: PRODUCT | CLASSIFICATION\n

\n
\n
ORACLE_LINUX
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
REDHAT_ENTERPRISE_LINUX
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
SUSE
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
UBUNTU
\n
\n

Valid properties: PRODUCT | PRIORITY\n

\n
\n
WINDOWS
\n
\n

Valid properties: PRODUCT | PRODUCT_FAMILY |\n CLASSIFICATION | MSRC_SEVERITY\n

\n
\n
", + "smithy.api#documentation": "

Lists the properties of available patches organized by product, product family,\n classification, severity, and other properties of available patches. You can use the reported\n properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.

\n

The following section lists the properties that can be used in filters for each major\n operating system type:

\n
\n
AMAZON_LINUX
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
AMAZON_LINUX_2
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
AMAZON_LINUX_2023
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
CENTOS
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
DEBIAN
\n
\n

Valid properties: PRODUCT | PRIORITY\n

\n
\n
MACOS
\n
\n

Valid properties: PRODUCT | CLASSIFICATION\n

\n
\n
ORACLE_LINUX
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
REDHAT_ENTERPRISE_LINUX
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
SUSE
\n
\n

Valid properties: PRODUCT | CLASSIFICATION |\n SEVERITY\n

\n
\n
UBUNTU
\n
\n

Valid properties: PRODUCT | PRIORITY\n

\n
\n
WINDOWS
\n
\n

Valid properties: PRODUCT | PRODUCT_FAMILY |\n CLASSIFICATION | MSRC_SEVERITY\n

\n
\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -12091,7 +12091,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns detailed information about command execution for an invocation or plugin.

\n

\n GetCommandInvocation only gives the execution status of a plugin in a document.\n To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes,\n use ListCommands.

", + "smithy.api#documentation": "

Returns detailed information about command execution for an invocation or plugin. The Run\n Command API follows an eventual consistency model, due to the distributed nature of the system\n supporting the API. This means that the result of an API command you run that affects your\n resources might not be immediately visible to all subsequent commands you run. You should keep\n this in mind when you carry out an API command that immediately follows a previous API\n command.

\n

\n GetCommandInvocation only gives the execution status of a plugin in a document.\n To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes,\n use ListCommands.

", "smithy.waiters#waitable": { "CommandExecuted": { "acceptors": [ @@ -13408,7 +13408,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TaskType": { @@ -14797,7 +14797,7 @@ "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager\n managed node. This call doesn't return the IAM role for Amazon Elastic Compute Cloud\n (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use\n the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" + "smithy.api#documentation": "

The role assigned to an Amazon EC2 instance configured with a Systems Manager\n Quick Setup host management configuration or the role assigned to an on-premises managed\n node.

\n

This call doesn't return the IAM role for unmanaged\n Amazon EC2 instances (instances not configured for Systems Manager). To retrieve the\n role for an unmanaged instance, use the Amazon EC2 DescribeInstances operation. For\n information, see DescribeInstances in the\n Amazon EC2 API Reference or describe-instances in the\n Amazon Web Services CLI Command Reference.

" } }, "RegistrationDate": { @@ -15329,7 +15329,7 @@ "Name": { "target": "com.amazonaws.ssm#InstanceName", "traits": { - "smithy.api#documentation": "

The value of the EC2 Name tag associated with the node. If a Name tag hasn't been applied to the node, this value is blank.

" + "smithy.api#documentation": "

The value of the EC2 Name tag associated with the node. If a Name\n tag hasn't been applied to the node, this value is blank.

" } }, "InstanceId": { @@ -15347,13 +15347,13 @@ "InstanceRole": { "target": "com.amazonaws.ssm#InstanceRole", "traits": { - "smithy.api#documentation": "

The instance profile attached to the node. If an instance profile isn't attached to the node, this value is blank.

" + "smithy.api#documentation": "

The instance profile attached to the node. If an instance profile isn't attached to the\n node, this value is blank.

" } }, "KeyName": { "target": "com.amazonaws.ssm#KeyName", "traits": { - "smithy.api#documentation": "

The name of the key pair associated with the node. If a key pair isnt't associated with the node, this value is blank.

" + "smithy.api#documentation": "

The name of the key pair associated with the node. If a key pair isnt't associated with the\n node, this value is blank.

" } }, "InstanceState": { @@ -15365,13 +15365,13 @@ "Architecture": { "target": "com.amazonaws.ssm#Architecture", "traits": { - "smithy.api#documentation": "

The CPU architecture of the node. For example, x86_64.

" + "smithy.api#documentation": "

The CPU architecture of the node. For example, x86_64.

" } }, "IPAddress": { "target": "com.amazonaws.ssm#IPAddress", "traits": { - "smithy.api#documentation": "

The public IPv4 address assigned to the node. If a public IPv4 address isn't assigned to the node, this value is blank.

" + "smithy.api#documentation": "

The public IPv4 address assigned to the node. If a public IPv4 address isn't assigned to the\n node, this value is blank.

" } }, "LaunchTime": { @@ -15419,13 +15419,13 @@ "ActivationId": { "target": "com.amazonaws.ssm#ActivationId", "traits": { - "smithy.api#documentation": "

The activation ID created by Systems Manager when the server or virtual machine (VM) was registered

" + "smithy.api#documentation": "

The activation ID created by Systems Manager when the server or virtual machine (VM) was\n registered

" } }, "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

The IAM role used in the hybrid activation to register the node with Systems Manager.

" + "smithy.api#documentation": "

The IAM role used in the hybrid activation to register the node with\n Systems Manager.

" } }, "RegistrationDate": { @@ -15503,7 +15503,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a filter for a specific list of managed nodes. You can filter node information by using tags. You specify tags by using a key-value mapping.

" + "smithy.api#documentation": "

Describes a filter for a specific list of managed nodes. You can filter node information by\n using tags. You specify tags by using a key-value mapping.

" } }, "com.amazonaws.ssm#InstancePropertyFilterKey": { @@ -18147,7 +18147,20 @@ "outputToken": "NextToken", "items": "DocumentIdentifiers", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListDocumentsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.ssm#ListDocumentsRequest": { @@ -19503,7 +19516,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TimeoutSeconds": { @@ -19697,7 +19710,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "MaxConcurrency": { @@ -23249,7 +23262,7 @@ "target": "com.amazonaws.ssm#DefaultBaseline", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch\n baselines. For example, you can create a default patch baseline for each operating system.

" + "smithy.api#documentation": "

Indicates whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default\n patch baselines. For example, you can create a default patch baseline for each operating\n system.

" } } }, @@ -23984,13 +23997,13 @@ "target": "com.amazonaws.ssm#ApproveAfterDays", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The number of days after the release date of each patch matched by the rule that the patch\n is marked as approved in the patch baseline. For example, a value of 7 means that\n patches are approved seven days after they are released. Not supported on Debian Server or Ubuntu\n Server.

" + "smithy.api#documentation": "

The number of days after the release date of each patch matched by the rule that the patch\n is marked as approved in the patch baseline. For example, a value of 7 means that\n patches are approved seven days after they are released.

\n \n

This parameter is marked as not required, but your request must include a value\n for either ApproveAfterDays or ApproveUntilDate.

\n
\n

Not supported for Debian Server or Ubuntu Server.

" } }, "ApproveUntilDate": { "target": "com.amazonaws.ssm#PatchStringDateTime", "traits": { - "smithy.api#documentation": "

The cutoff date for auto approval of released patches. Any patches released on or before\n this date are installed automatically. Not supported on Debian Server or Ubuntu Server.

\n

Enter dates in the format YYYY-MM-DD. For example,\n 2021-12-31.

" + "smithy.api#documentation": "

The cutoff date for auto approval of released patches. Any patches released on or before\n this date are installed automatically.

\n

Enter dates in the format YYYY-MM-DD. For example,\n 2021-12-31.

\n \n

This parameter is marked as not required, but your request must include a value\n for either ApproveUntilDate or ApproveAfterDays.

\n
\n

Not supported for Debian Server or Ubuntu Server.

" } }, "EnableNonSecurity": { @@ -29779,7 +29792,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TaskParameters": { @@ -30190,7 +30203,7 @@ "RejectedPatchesAction": { "target": "com.amazonaws.ssm#PatchAction", "traits": { - "smithy.api#documentation": "

The action for Patch Manager to take on patches included in the\n RejectedPackages list.

\n
    \n
  • \n

    \n \n ALLOW_AS_DEPENDENCY\n : A package in the\n Rejected patches list is installed only if it is a dependency of another package.\n It is considered compliant with the patch baseline, and its status is reported as\n InstalledOther. This is the default action if no option is specified.

    \n
  • \n
  • \n

    \n BLOCK: Packages in the Rejected\n patches list, and packages that include them as dependencies, aren't installed by\n Patch Manager under any circumstances. If a package was installed before it was added to the\n Rejected patches list, or is installed outside of Patch\n Manager afterward, it's considered noncompliant with the patch baseline and its status is\n reported as InstalledRejected.

    \n
  • \n
" + "smithy.api#documentation": "

The action for Patch Manager to take on patches included in the\n RejectedPackages list.

\n
\n
ALLOW_AS_DEPENDENCY
\n
\n

\n Linux and macOS: A package in the rejected patches list\n is installed only if it is a dependency of another package. It is considered compliant with\n the patch baseline, and its status is reported as INSTALLED_OTHER. This is the\n default action if no option is specified.

\n

\n Windows Server: Windows Server doesn't support the\n concept of package dependencies. If a package in the rejected patches list and already\n installed on the node, its status is reported as INSTALLED_OTHER. Any package not\n already installed on the node is skipped. This is the default action if no option is\n specified.

\n
\n
BLOCK
\n
\n

\n All OSs: Packages in the rejected patches list, and\n packages that include them as dependencies, aren't installed by Patch Manager under any\n circumstances. If a package was installed before it was added to the rejected patches list, or\n is installed outside of Patch Manager afterward, it's considered noncompliant with the patch\n baseline and its status is reported as INSTALLED_REJECTED.

\n
\n
" } }, "Description": { diff --git a/models/support.json b/models/support.json index b0f5eb9a8e..90099d61b5 100644 --- a/models/support.json +++ b/models/support.json @@ -94,7 +94,7 @@ "name": "support" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Web Services Support\n

The Amazon Web Services Support API Reference is intended for programmers who need detailed\n information about the Amazon Web Services Support operations and data types. You can use the API to manage\n your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return\n results in JSON format.

\n \n
    \n
  • \n

    You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.

    \n
  • \n
  • \n

    If you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException error message appears. For\n information about changing your support plan, see Amazon Web Services Support.

    \n
  • \n
\n
\n

You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of\n checks and their descriptions, get check results, specify checks to refresh, and get the\n refresh status of checks.

\n

You can manage your support cases with the following Amazon Web Services Support API operations:

\n \n

You can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more\n information, see Trusted Advisor in the\n Amazon Web Services Support User Guide.

\n

For authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing\n Process.

\n

For more information about this service and the endpoints to use, see About the\n Amazon Web Services Support API in the Amazon Web Services Support User Guide.

", + "smithy.api#documentation": "Amazon Web Services Support\n

The Amazon Web Services Support API Reference is intended for programmers who need detailed\n information about the Amazon Web Services Support operations and data types. You can use the API to manage\n your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return\n results in JSON format.

\n \n
    \n
  • \n

    You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.

    \n
  • \n
  • \n

    If you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException error message appears. For\n information about changing your support plan, see Amazon Web Services Support.

    \n
  • \n
\n
\n

You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of\n checks and their descriptions, get check results, specify checks to refresh, and get the\n refresh status of checks.

\n

You can manage your support cases with the following Amazon Web Services Support API operations:

\n \n

You can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more\n information, see Trusted Advisor in the\n Amazon Web Services Support User Guide.

\n

For authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing\n Process.

\n

For more information about this service and the endpoints to use, see About the\n Amazon Web Services Support API in the Amazon Web Services Support User Guide.

", "smithy.api#title": "AWS Support", "smithy.api#xmlNamespace": { "uri": "http://support.amazonaws.com/doc/2013-04-15/" @@ -1387,7 +1387,7 @@ } }, "traits": { - "smithy.api#documentation": "

An attachment to a case communication. The attachment consists of the file name and\n the content of the file.

" + "smithy.api#documentation": "

An attachment to a case communication. The attachment consists of the file name and\n the content of the file. Each attachment file size should not exceed 5 MB. File types that are supported include the following: pdf, jpeg,.doc, .log, .text

" } }, "com.amazonaws.support#AttachmentDetails": { @@ -1554,7 +1554,7 @@ "status": { "target": "com.amazonaws.support#Status", "traits": { - "smithy.api#documentation": "

The status of the case.

\n

Valid values:

\n
    \n
  • \n

    \n opened\n

    \n
  • \n
  • \n

    \n pending-customer-action\n

    \n
  • \n
  • \n

    \n reopened\n

    \n
  • \n
  • \n

    \n resolved\n

    \n
  • \n
  • \n

    \n unassigned\n

    \n
  • \n
  • \n

    \n work-in-progress\n

    \n
  • \n
" + "smithy.api#documentation": "

The status of the case.

\n

Valid values:

\n
    \n
  • \n

    \n all-open\n

    \n
  • \n
  • \n

    \n customer-action-completed\n

    \n
  • \n
  • \n

    \n opened\n

    \n
  • \n
  • \n

    \n pending-customer-action\n

    \n
  • \n
  • \n

    \n reopened\n

    \n
  • \n
  • \n

    \n resolved\n

    \n
  • \n
  • \n

    \n unassigned\n

    \n
  • \n
  • \n

    \n work-in-progress\n

    \n
  • \n
" } }, "serviceCode": { @@ -1607,7 +1607,7 @@ } }, "traits": { - "smithy.api#documentation": "

A JSON-formatted object that contains the metadata for a support case. It is contained\n in the response from a DescribeCases request. CaseDetails contains the following fields:

\n
    \n
  • \n

    \n caseId - The support case ID requested\n or returned in the call. The case ID is an alphanumeric string formatted as\n shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47.

    \n
  • \n
  • \n

    \n categoryCode - The category of problem\n for the support case. Corresponds to the CategoryCode values\n returned by a call to DescribeServices.

    \n
  • \n
  • \n

    \n displayId - The identifier for the case\n on pages in the Amazon Web Services Support Center.

    \n
  • \n
  • \n

    \n language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1\ncode for the language parameter if you want support in that language.

    \n
  • \n
  • \n

    \n nextToken - A resumption point for\n pagination.

    \n
  • \n
  • \n

    \n recentCommunications - One or more Communication objects. Fields of these objects are\n attachments, body, caseId,\n submittedBy, and timeCreated.

    \n
  • \n
  • \n

    \n serviceCode - The identifier for the\n Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.

    \n
  • \n
  • \n

    \n severityCode - The severity code\n assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are:\n low, normal, high,\n urgent, and critical.

    \n
  • \n
  • \n

    \n status - The status of the case in the\n Amazon Web Services Support Center. Valid values:

    \n
      \n
    • \n

      \n opened\n

      \n
    • \n
    • \n

      \n pending-customer-action\n

      \n
    • \n
    • \n

      \n reopened\n

      \n
    • \n
    • \n

      \n resolved\n

      \n
    • \n
    • \n

      \n unassigned\n

      \n
    • \n
    • \n

      \n work-in-progress\n

      \n
    • \n
    \n
  • \n
  • \n

    \n subject - The subject line of the\n case.

    \n
  • \n
  • \n

    \n submittedBy - The email address of the\n account that submitted the case.

    \n
  • \n
  • \n

    \n timeCreated - The time the case was\n created, in ISO-8601 format.

    \n
  • \n
" + "smithy.api#documentation": "

A JSON-formatted object that contains the metadata for a support case. It is contained\n in the response from a DescribeCases request. CaseDetails contains the following fields:

\n
    \n
  • \n

    \n caseId - The support case ID requested\n or returned in the call. The case ID is an alphanumeric string formatted as\n shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47.

    \n
  • \n
  • \n

    \n categoryCode - The category of problem\n for the support case. Corresponds to the CategoryCode values\n returned by a call to DescribeServices.

    \n
  • \n
  • \n

    \n displayId - The identifier for the case\n on pages in the Amazon Web Services Support Center.

    \n
  • \n
  • \n

    \n language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1\ncode for the language parameter if you want support in that language.

    \n
  • \n
  • \n

    \n nextToken - A resumption point for\n pagination.

    \n
  • \n
  • \n

    \n recentCommunications - One or more Communication objects. Fields of these objects are\n attachments, body, caseId,\n submittedBy, and timeCreated.

    \n
  • \n
  • \n

    \n serviceCode - The identifier for the\n Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.

    \n
  • \n
  • \n

    \n severityCode - The severity code\n assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are:\n low, normal, high,\n urgent, and critical.

    \n
  • \n
  • \n

    \n status - The status of the case in the\n Amazon Web Services Support Center. Valid values:

    \n
      \n
    • \n

      \n all-open\n

      \n
    • \n
    • \n

      \n customer-action-completed\n

      \n
    • \n
    • \n

      \n opened\n

      \n
    • \n
    • \n

      \n pending-customer-action\n

      \n
    • \n
    • \n

      \n reopened\n

      \n
    • \n
    • \n

      \n resolved\n

      \n
    • \n
    • \n

      \n unassigned\n

      \n
    • \n
    • \n

      \n work-in-progress\n

      \n
    • \n
    \n
  • \n
  • \n

    \n subject - The subject line of the\n case.

    \n
  • \n
  • \n

    \n submittedBy - The email address of the\n account that submitted the case.

    \n
  • \n
  • \n

    \n timeCreated - The time the case was\n created, in ISO-8601 format.

    \n
  • \n
" } }, "com.amazonaws.support#CaseId": { diff --git a/models/tnb.json b/models/tnb.json index 16c21fe9c6..a0c77f0f06 100644 --- a/models/tnb.json +++ b/models/tnb.json @@ -101,7 +101,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a function package.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network. For more information, see Function packages in the Amazon Web Services Telco Network Builder User Guide. \n

\n

Creating a function package is the first step for creating a network in AWS TNB. This request creates an empty container with an ID. The next step is to upload the actual CSAR zip file into that empty container. To upload function package content, see PutSolFunctionPackageContent.

", + "smithy.api#documentation": "

Creates a function package.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network. For more information, see Function packages in the\n Amazon Web Services Telco Network Builder User Guide.

\n

Creating a function package is the first step for creating a network in AWS TNB. This\n request creates an empty container with an ID. The next step is to upload the actual CSAR\n zip file into that empty container. To upload function package content, see PutSolFunctionPackageContent.

", "smithy.api#examples": [ { "title": "Create a Sol function package", @@ -221,7 +221,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. Creating a network instance is the third step after creating a network package. For more information about network instances, Network instances in the Amazon Web Services Telco Network Builder User Guide.

\n

Once you create a network instance, you can instantiate it. To instantiate a network, see InstantiateSolNetworkInstance.

", + "smithy.api#documentation": "

Creates a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. Creating a network instance is the third step after creating a network\n package. For more information about network instances, Network instances in the\n Amazon Web Services Telco Network Builder User Guide.

\n

Once you create a network instance, you can instantiate it. To instantiate a network,\n see InstantiateSolNetworkInstance.

", "smithy.api#examples": [ { "title": "Create a Sol Network Instance", @@ -361,7 +361,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a network package.

\n

A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on. For more information, see Network instances in the Amazon Web Services Telco Network Builder User Guide. \n

\n

A network package consists of a network service descriptor (NSD) file (required) and any additional files (optional), such as scripts specific to your needs. For example, if you have multiple function packages in your network package, you can use the NSD to define which network functions should run in certain VPCs, subnets, or EKS clusters.

\n

This request creates an empty network package container with an ID. Once you create a network package, you can upload the network package content using PutSolNetworkPackageContent.

", + "smithy.api#documentation": "

Creates a network package.

\n

A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on. For more information, see Network instances in the\n Amazon Web Services Telco Network Builder User Guide.

\n

A network package consists of a network service descriptor (NSD) file (required) and any\n additional files (optional), such as scripts specific to your needs. For example, if you\n have multiple function packages in your network package, you can use the NSD to define\n which network functions should run in certain VPCs, subnets, or EKS clusters.

\n

This request creates an empty network package container with an ID. Once you create a\n network package, you can upload the network package content using PutSolNetworkPackageContent.

", "smithy.api#examples": [ { "title": "Create a Sol network package", @@ -478,7 +478,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a function package.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.

\n

To delete a function package, the package must be in a disabled state. To disable a function package, see UpdateSolFunctionPackage.\n

", + "smithy.api#documentation": "

Deletes a function package.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.

\n

To delete a function package, the package must be in a disabled state. To disable a\n function package, see UpdateSolFunctionPackage.

", "smithy.api#examples": [ { "title": "Delete a function package", @@ -537,7 +537,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

\n

To delete a network instance, the instance must be in a stopped or terminated state. To terminate a network instance, see TerminateSolNetworkInstance.

", + "smithy.api#documentation": "

Deletes a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

\n

To delete a network instance, the instance must be in a stopped or terminated state. To\n terminate a network instance, see TerminateSolNetworkInstance.

", "smithy.api#examples": [ { "title": "Delete a Sol Network Instance.", @@ -596,7 +596,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes network package.

\n

A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.

\n

To delete a network package, the package must be in a disable state. To disable a network package, see UpdateSolNetworkPackage.

", + "smithy.api#documentation": "

Deletes network package.

\n

A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.

\n

To delete a network package, the package must be in a disable state. To disable a\n network package, see UpdateSolNetworkPackage.

", "smithy.api#examples": [ { "title": "Delete a Sol network package", @@ -717,7 +717,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the details of a network function instance, including the instantation state and metadata from the function package descriptor in the network function package.

\n

A network function instance is a function in a function package .

", + "smithy.api#documentation": "

Gets the details of a network function instance, including the instantiation state and\n metadata from the function package descriptor in the network function package.

\n

A network function instance is a function in a function package .

", "smithy.api#examples": [ { "title": "Get a Sol Network Function Instance details", @@ -910,7 +910,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the details of an individual function package, such as the operational state and whether the package is in use.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network..

", + "smithy.api#documentation": "

Gets the details of an individual function package, such as the operational state and\n whether the package is in use.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network..

", "smithy.api#examples": [ { "title": "Describe a function package with correct vnfPkgId", @@ -1098,7 +1098,7 @@ "accept": { "target": "com.amazonaws.tnb#DescriptorContentType", "traits": { - "smithy.api#documentation": "

Indicates which content types, expressed as MIME types, the client is able to understand.

", + "smithy.api#documentation": "

Indicates which content types, expressed as MIME types, the client is able to\n understand.

", "smithy.api#httpHeader": "Accept", "smithy.api#required": {} } @@ -1217,7 +1217,7 @@ "vnfdId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Function package descriptor ID.

" + "smithy.api#documentation": "

Function package descriptor ID.

" } }, "vnfProvider": { @@ -1462,7 +1462,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the details of a network operation, including the tasks involved in the network operation and the status of the tasks.

\n

A network operation is any operation that is done to your network, such as network instance instantiation or termination.

", + "smithy.api#documentation": "

Gets the details of a network operation, including the tasks involved in the network\n operation and the status of the tasks.

\n

A network operation is any operation that is done to your network, such as network instance instantiation or termination.

", "smithy.api#examples": [ { "title": "Get Sol Network Instantiate operation", @@ -1476,6 +1476,85 @@ "operationState": "COMPLETED", "lcmOperationType": "INSTANTIATE", "metadata": { + "instantiateMetadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", + "additionalParamsForNs": { + "cidr_block": "10.0.0.0/16", + "availability_zone": "us-west-2a" + } + }, + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + }, + "tasks": [ + { + "taskName": "HookExecution", + "taskContext": { + "cloudWatchLogsARN": "arn:aws:logs:us-east-1:123456789000:log-group:/aws/codebuild/TestProject:log-stream:a4dc6b0b-2ea3-48c5-bb30-636c4f376b81" + }, + "taskStatus": "IN_PROGRESS", + "taskStartTime": "2022-06-10T19:48:34Z", + "taskEndTime": "2022-06-10T21:48:33Z" + } + ] + } + }, + { + "title": "Get Sol Network Update operation", + "input": { + "nsLcmOpOccId": "no-0d5b823eb5c2a9241" + }, + "output": { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "UPDATE_NS", + "metadata": { + "updateNsMetadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", + "additionalParamsForNs": { + "cidr_block": "10.0.0.0/16", + "availability_zone": "us-west-2a" + } + }, + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + }, + "tasks": [ + { + "taskName": "HookExecution", + "taskContext": { + "cloudWatchLogsARN": "arn:aws:logs:us-east-1:123456789000:log-group:/aws/codebuild/TestProject:log-stream:a4dc6b0b-2ea3-48c5-bb30-636c4f376b81" + }, + "taskStatus": "IN_PROGRESS", + "taskStartTime": "2022-06-10T19:48:34Z", + "taskEndTime": "2022-06-10T21:48:33Z" + } + ] + } + }, + { + "title": "Get Sol Network Update operation", + "input": { + "nsLcmOpOccId": "no-0d5b823eb5c2a9241" + }, + "output": { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "MODIFY_VNF_INFORMATION", + "metadata": { + "modifyVnfInfoMetadata": { + "vnfInstanceId": "fi-0d5b823eb5c2a9241", + "vnfConfigurableProperties": { + "pcf.port": "8080", + "pcf.pods": "10" + } + }, "createdAt": "2022-06-10T19:48:34Z", "lastModified": "2022-06-10T21:48:33Z" }, @@ -1561,6 +1640,24 @@ "com.amazonaws.tnb#GetSolNetworkOperationMetadata": { "type": "structure", "members": { + "updateNsMetadata": { + "target": "com.amazonaws.tnb#UpdateNsMetadata", + "traits": { + "smithy.api#documentation": "

Metadata related to the network operation occurrence for network instance updates.\n This is populated only if the lcmOperationType is UPDATE and the\n updateType is UPDATE_NS.

" + } + }, + "modifyVnfInfoMetadata": { + "target": "com.amazonaws.tnb#ModifyVnfInfoMetadata", + "traits": { + "smithy.api#documentation": "

Metadata related to the network operation occurrence for network function updates in a network instance.\n This is populated only if the lcmOperationType is UPDATE and the\n updateType is MODIFY_VNF_INFORMATION.

" + } + }, + "instantiateMetadata": { + "target": "com.amazonaws.tnb#InstantiateMetadata", + "traits": { + "smithy.api#documentation": "

Metadata related to the network operation occurrence for network instantiation.\n This is populated only if the lcmOperationType is INSTANTIATE.

" + } + }, "createdAt": { "target": "smithy.api#Timestamp", "traits": { @@ -1616,6 +1713,12 @@ "smithy.api#documentation": "

Type of the operation represented by this occurrence.

" } }, + "updateType": { + "target": "com.amazonaws.tnb#UpdateSolNetworkType", + "traits": { + "smithy.api#documentation": "

Type of the update. Only present if the network operation\n lcmOperationType is UPDATE.

" + } + }, "error": { "target": "com.amazonaws.tnb#ProblemDetails", "traits": { @@ -1967,7 +2070,7 @@ "nsd": { "target": "com.amazonaws.tnb#NetworkArtifactMeta", "traits": { - "smithy.api#documentation": "

Metadata related to the onboarded network service descriptor in the network package.

" + "smithy.api#documentation": "

Metadata related to the onboarded network service descriptor in the network\n package.

" } }, "createdAt": { @@ -2053,7 +2156,7 @@ "vnfPkgIds": { "target": "com.amazonaws.tnb#VnfPkgIdList", "traits": { - "smithy.api#documentation": "

Identifies the function package for the function package descriptor referenced by the onboarded network package.

", + "smithy.api#documentation": "

Identifies the function package for the function package descriptor referenced by the\n onboarded network package.

", "smithy.api#required": {} } }, @@ -2140,6 +2243,27 @@ "smithy.api#documentation": "

The metadata of a network function.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

" } }, + "com.amazonaws.tnb#InstantiateMetadata": { + "type": "structure", + "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "

The network service descriptor used for instantiating the network instance.

", + "smithy.api#required": {} + } + }, + "additionalParamsForNs": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The configurable properties used during instantiation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Metadata related to the configuration properties used during instantiation of the network instance.

" + } + }, "com.amazonaws.tnb#InstantiateSolNetworkInstance": { "type": "operation", "input": { @@ -2169,7 +2293,7 @@ } ], "traits": { - "smithy.api#documentation": "

Instantiates a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

\n

Before you can instantiate a network instance, you have to create a network instance. For more information, see CreateSolNetworkInstance.

", + "smithy.api#documentation": "

Instantiates a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

\n

Before you can instantiate a network instance, you have to create a network instance.\n For more information, see CreateSolNetworkInstance.

", "smithy.api#examples": [ { "title": "Instantiate a Sol Network Instance", @@ -2234,7 +2358,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -2255,7 +2379,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -3052,7 +3176,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists details for a network operation, including when the operation started and the status of the operation.

\n

A network operation is any operation that is done to your network, such as network instance instantiation or termination.

", + "smithy.api#documentation": "

Lists details for a network operation, including when the operation started and the\n status of the operation.

\n

A network operation is any operation that is done to your network, such as network instance instantiation or termination.

", "smithy.api#examples": [ { "title": "List Sol Network Instantiate operations", @@ -3066,6 +3190,7 @@ "operationState": "COMPLETED", "lcmOperationType": "INSTANTIATE", "metadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", "createdAt": "2022-06-10T19:48:34Z", "lastModified": "2022-06-10T21:48:33Z" } @@ -3092,12 +3217,59 @@ "detail": "An error occurred (InsufficientInstanceCapacity) when calling the RunInstances operation (reached max retries: 4). We currently do not have sufficient capacity in the Availability Zone you requested" }, "metadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", "createdAt": "2022-06-10T19:48:33Z", "lastModified": "2022-06-10T19:48:33Z" } } ] } + }, + { + "title": "List Sol Network Update operations", + "input": { + "nsInstanceId": "ni-0d5b823eb5c2a9241" + }, + "output": { + "networkOperations": [ + { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "MODIFY_VNF_INFORMATION", + "metadata": { + "vnfInstanceId": "fi-0d5b823eb5c2a9241", + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + } + } + ] + } + }, + { + "title": "List Sol Network Update operations", + "input": { + "nsInstanceId": "ni-0d5b823eb5c2a9241" + }, + "output": { + "networkOperations": [ + { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "UPDATE_NS", + "metadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + } + } + ] + } } ], "smithy.api#http": { @@ -3151,6 +3323,12 @@ "smithy.api#required": {} } }, + "updateType": { + "target": "com.amazonaws.tnb#UpdateSolNetworkType", + "traits": { + "smithy.api#documentation": "

Type of the update. Only present if the network operation lcmOperationType is UPDATE.

" + } + }, "error": { "target": "com.amazonaws.tnb#ProblemDetails", "traits": { @@ -3171,6 +3349,13 @@ "com.amazonaws.tnb#ListSolNetworkOperationsInput": { "type": "structure", "members": { + "nsInstanceId": { + "target": "com.amazonaws.tnb#NsInstanceId", + "traits": { + "smithy.api#documentation": "

Network instance id filter, to retrieve network operations associated to a network instance.

", + "smithy.api#httpQuery": "nsInstanceId" + } + }, "maxResults": { "target": "smithy.api#Integer", "traits": { @@ -3197,6 +3382,18 @@ "com.amazonaws.tnb#ListSolNetworkOperationsMetadata": { "type": "structure", "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "

The network service descriptor id used for the operation.

\n

Only present if the updateType is UPDATE_NS.

" + } + }, + "vnfInstanceId": { + "target": "com.amazonaws.tnb#VnfInstanceId", + "traits": { + "smithy.api#documentation": "

The network function id used for the operation.

\n

Only present if the updateType is MODIFY_VNF_INFO.

" + } + }, "createdAt": { "target": "smithy.api#Timestamp", "traits": { @@ -3315,7 +3512,7 @@ "vnfPkgIds": { "target": "com.amazonaws.tnb#VnfPkgIdList", "traits": { - "smithy.api#documentation": "

Identifies the function package for the function package descriptor referenced by the onboarded network package.

" + "smithy.api#documentation": "

Identifies the function package for the function package descriptor referenced by the\n onboarded network package.

" } }, "metadata": { @@ -3593,6 +3790,28 @@ "smithy.api#output": {} } }, + "com.amazonaws.tnb#ModifyVnfInfoMetadata": { + "type": "structure", + "members": { + "vnfInstanceId": { + "target": "com.amazonaws.tnb#VnfInstanceId", + "traits": { + "smithy.api#documentation": "

The network function instance that was updated in the network instance.

", + "smithy.api#required": {} + } + }, + "vnfConfigurableProperties": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The configurable properties used during update of the network function instance.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Metadata related to the configuration properties used during update of a specific\n network function in a network instance.

" + } + }, "com.amazonaws.tnb#NetworkArtifactMeta": { "type": "structure", "members": { @@ -3681,12 +3900,24 @@ "smithy.api#enumValue": "NOT_INSTANTIATED" } }, + "UPDATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATED" + } + }, "IMPAIRED": { "target": "smithy.api#Unit", "traits": { "smithy.api#enumValue": "IMPAIRED" } }, + "UPDATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE_FAILED" + } + }, "STOPPED": { "target": "smithy.api#Unit", "traits": { @@ -3705,6 +3936,12 @@ "smithy.api#enumValue": "INSTANTIATE_IN_PROGRESS" } }, + "INTENT_TO_UPDATE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTENT_TO_UPDATE_IN_PROGRESS" + } + }, "UPDATE_IN_PROGRESS": { "target": "smithy.api#Unit", "traits": { @@ -3951,7 +4188,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "

Function package file.

", "smithy.api#httpPayload": {}, @@ -4112,7 +4349,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "

Network package file.

", "smithy.api#httpPayload": {}, @@ -4208,6 +4445,12 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.tnb#SensitiveBlob": { + "type": "blob", + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.tnb#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -4363,7 +4606,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "

Amazon Web Services Telco Network Builder (TNB) is a network automation service that helps you deploy and manage telecom networks. AWS TNB helps you with the lifecycle management of your telecommunication network functions throughout planning, deployment, and post-deployment activities.

", + "smithy.api#documentation": "

\n Amazon Web Services Telco Network Builder (TNB) is a network automation service that helps\n you deploy and manage telecom networks. AWS TNB helps you with the lifecycle management of\n your telecommunication network functions throughout planning, deployment, and\n post-deployment activities.

", "smithy.api#title": "AWS Telco Network Builder", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -4407,7 +4650,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4426,7 +4668,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -4454,13 +4695,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -4473,7 +4715,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4487,7 +4728,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4510,7 +4750,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4545,11 +4784,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4560,16 +4797,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -4583,14 +4823,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -4599,15 +4837,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4618,16 +4855,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -4641,7 +4881,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4661,11 +4900,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4676,20 +4913,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4700,18 +4939,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -5259,7 +5502,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -5279,7 +5522,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -5388,6 +5631,27 @@ "smithy.api#output": {} } }, + "com.amazonaws.tnb#UpdateNsMetadata": { + "type": "structure", + "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "

The network service descriptor used for updating the network instance.

", + "smithy.api#required": {} + } + }, + "additionalParamsForNs": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The configurable properties used during update.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Metadata related to the configuration properties used during update of a network instance.

" + } + }, "com.amazonaws.tnb#UpdateSolFunctionPackage": { "type": "operation", "input": { @@ -5510,7 +5774,7 @@ } ], "traits": { - "smithy.api#documentation": "

Update a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

", + "smithy.api#documentation": "

Update a network instance.

\n

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

\n

Choose the updateType parameter to target the necessary update of the network instance.

", "smithy.api#examples": [ { "title": "Update a Sol Network Instance", @@ -5534,6 +5798,29 @@ "Name": "Resource" } } + }, + { + "title": "Update a Sol Network Instance", + "input": { + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "updateType": "UPDATE_NS", + "updateNs": { + "nsdInfoId": "np-0d5b823eb5c2a9241", + "additionalParamsForNs": { + "cidr_block": "10.0.0.0/16", + "availability_zone": "us-west-2a" + } + }, + "tags": { + "Name": "Resource" + } + }, + "output": { + "nsLcmOpOccId": "no-0d5b823eb5c2a9241", + "tags": { + "Name": "Resource" + } + } } ], "smithy.api#http": { @@ -5557,20 +5844,26 @@ "updateType": { "target": "com.amazonaws.tnb#UpdateSolNetworkType", "traits": { - "smithy.api#documentation": "

The type of update.

", + "smithy.api#documentation": "

The type of update.

\n
    \n
  • \n

    Use the MODIFY_VNF_INFORMATION update type, to update a specific network function\n configuration, in the network instance.

    \n
  • \n
  • \n

    Use the UPDATE_NS update type, to update the network instance to a\n new network service descriptor.

    \n
  • \n
", "smithy.api#required": {} } }, "modifyVnfInfoData": { "target": "com.amazonaws.tnb#UpdateSolNetworkModify", "traits": { - "smithy.api#documentation": "

Identifies the network function information parameters and/or the configurable properties of the network function to be modified.

" + "smithy.api#documentation": "

Identifies the network function information parameters and/or the configurable\n properties of the network function to be modified.

\n

Include this property only if the update type is MODIFY_VNF_INFORMATION.

" + } + }, + "updateNs": { + "target": "com.amazonaws.tnb#UpdateSolNetworkServiceData", + "traits": { + "smithy.api#documentation": "

Identifies the network service descriptor and the configurable\n properties of the descriptor, to be used for the update.

\n

Include this property only if the update type is UPDATE_NS.

" } }, "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -5590,7 +5883,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "smithy.api#documentation": "

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -5611,7 +5904,7 @@ "vnfConfigurableProperties": { "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "

Provides values for the configurable properties declared in the function package descriptor.

", + "smithy.api#documentation": "

Provides values for the configurable properties declared in the function package\n descriptor.

", "smithy.api#required": {} } } @@ -5713,6 +6006,27 @@ "smithy.api#output": {} } }, + "com.amazonaws.tnb#UpdateSolNetworkServiceData": { + "type": "structure", + "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "

ID of the network service descriptor.

", + "smithy.api#required": {} + } + }, + "additionalParamsForNs": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

Values for the configurable properties declared in the network service descriptor.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information parameters and/or the configurable properties for a network descriptor used for update.

" + } + }, "com.amazonaws.tnb#UpdateSolNetworkType": { "type": "enum", "members": { @@ -5721,6 +6035,12 @@ "traits": { "smithy.api#enumValue": "MODIFY_VNF_INFORMATION" } + }, + "UPDATE_NS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE_NS" + } } } }, @@ -5767,7 +6087,7 @@ } ], "traits": { - "smithy.api#documentation": "

Validates function package content. This can be used as a dry run before uploading function package content with PutSolFunctionPackageContent.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.

", + "smithy.api#documentation": "

Validates function package content. This can be used as a dry run before uploading\n function package content with PutSolFunctionPackageContent.

\n

A function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.

", "smithy.api#examples": [ { "title": "Validate a Function Package content", @@ -5817,7 +6137,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "

Function package file.

", "smithy.api#httpPayload": {}, @@ -5916,7 +6236,7 @@ } ], "traits": { - "smithy.api#documentation": "

Validates network package content. This can be used as a dry run before uploading network package content with PutSolNetworkPackageContent.

\n

A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.

", + "smithy.api#documentation": "

Validates network package content. This can be used as a dry run before uploading\n network package content with PutSolNetworkPackageContent.

\n

A network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.

", "smithy.api#examples": [ { "title": "Validate the network package content of a NSD archive", @@ -5978,7 +6298,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "

Network package file.

", "smithy.api#httpPayload": {}, @@ -6069,7 +6389,7 @@ } }, "traits": { - "smithy.api#documentation": "

Unable to process the request because the client provided input failed to satisfy request constraints.

", + "smithy.api#documentation": "

Unable to process the request because the client provided input failed to satisfy\n request constraints.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/models/waf-regional.json b/models/waf-regional.json index 7e1900e027..dc6128bc42 100644 --- a/models/waf-regional.json +++ b/models/waf-regional.json @@ -7937,6 +7937,21 @@ ] } } + ], + "smithy.test#smokeTests": [ + { + "id": "ListRulesSuccess", + "params": { + "Limit": 20 + }, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } ] } }, diff --git a/models/workspaces.json b/models/workspaces.json index 9626704585..bf79144a7d 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -1076,7 +1076,7 @@ "DesiredUserSessions": { "target": "com.amazonaws.workspaces#DesiredUserSessions", "traits": { - "smithy.api#documentation": "

The desired number of user sessions for a multi-session pool. \n This is not allowed for single-session pools.

", + "smithy.api#documentation": "

The desired number of user sessions for the WorkSpaces in the pool.

", "smithy.api#required": {} } } @@ -1091,7 +1091,7 @@ "AvailableUserSessions": { "target": "com.amazonaws.workspaces#AvailableUserSessions", "traits": { - "smithy.api#documentation": "

The number of user sessions currently being used for pool sessions. This only applies to multi-session pools.

", + "smithy.api#documentation": "

The number of user sessions currently available for streaming from your pool.

\n

AvailableUserSessions = ActualUserSessions - ActiveUserSessions

", "smithy.api#required": {} } }, @@ -1105,14 +1105,14 @@ "ActualUserSessions": { "target": "com.amazonaws.workspaces#ActualUserSessions", "traits": { - "smithy.api#documentation": "

The total number of session slots that are available for a pool of WorkSpaces.

", + "smithy.api#documentation": "

The total number of user sessions that are available for streaming or are currently \n streaming in your pool.

\n

ActualUserSessions = AvailableUserSessions + ActiveUserSessions

", "smithy.api#required": {} } }, "ActiveUserSessions": { "target": "com.amazonaws.workspaces#ActiveUserSessions", "traits": { - "smithy.api#documentation": "

The number of user sessions currently being used for pool sessions. This only applies to multi-session pools.

", + "smithy.api#documentation": "

The number of user sessions currently being used for your pool.

", "smithy.api#required": {} } } @@ -2478,7 +2478,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates one or more WorkSpaces.

\n

This operation is asynchronous and returns before the WorkSpaces are created.

\n \n
    \n
  • \n

    The MANUAL running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    You don't need to specify the PCOIP protocol for Linux bundles\n because WSP is the default protocol for those bundles.

    \n
  • \n
  • \n

    User-decoupled WorkSpaces are only supported by Amazon WorkSpaces\n Core.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Creates one or more WorkSpaces.

\n

This operation is asynchronous and returns before the WorkSpaces are created.

\n \n
    \n
  • \n

    The MANUAL running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    You don't need to specify the PCOIP protocol for Linux bundles\n because WSP is the default protocol for those bundles.

    \n
  • \n
  • \n

    User-decoupled WorkSpaces are only supported by Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    Review your running mode to ensure you are using one that is optimal for your needs and budget.\n For more information on switching running modes, see \n \n Can I switch between hourly and monthly billing?\n

    \n
  • \n
\n
" } }, "com.amazonaws.workspaces#CreateWorkspacesPool": { @@ -5915,7 +5915,7 @@ "Applications": { "target": "com.amazonaws.workspaces#ApplicationList", "traits": { - "smithy.api#documentation": "

If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11\n BYOL images. For more information about subscribing to Office for BYOL images, see Bring\n Your Own Windows Desktop Licenses.

\n \n
    \n
  • \n

    Although this parameter is an array, only one item is allowed at this time.

    \n
  • \n
  • \n

    Windows 11 only supports Microsoft_Office_2019.

    \n
  • \n
\n
" + "smithy.api#documentation": "

If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11\n BYOL images. For more information about subscribing to Office for BYOL images, see Bring\n Your Own Windows Desktop Licenses.

\n \n
    \n
  • \n

    Although this parameter is an array, only one item is allowed at this\n time.

    \n
  • \n
  • \n

    During the image import process, non-GPU WSP WorkSpaces with Windows 11 support\n only Microsoft_Office_2019. GPU WSP WorkSpaces with Windows 11 do not\n support Office installation.

    \n
  • \n
\n
" } } }, @@ -10711,6 +10711,12 @@ "smithy.api#enumValue": "BYOL_REGULAR_WSP" } }, + "BYOL_GRAPHICS_G4DN_WSP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BYOL_GRAPHICS_G4DN_WSP" + } + }, "BYOL_REGULAR_BYOP": { "target": "smithy.api#Unit", "traits": { @@ -10799,7 +10805,7 @@ "RunningMode": { "target": "com.amazonaws.workspaces#RunningMode", "traits": { - "smithy.api#documentation": "

The running mode. For more information, see Manage the WorkSpace Running\n Mode.

\n \n

The MANUAL value is only supported by Amazon WorkSpaces Core. Contact\n your account team to be allow-listed to use this value. For more information, see\n Amazon WorkSpaces Core.

\n
" + "smithy.api#documentation": "

The running mode. For more information, see Manage the WorkSpace Running\n Mode.

\n \n

The MANUAL value is only supported by Amazon WorkSpaces Core. Contact\n your account team to be allow-listed to use this value. For more information, see\n Amazon WorkSpaces Core.

\n
\n

Review your running mode to ensure you are using one that is optimal for your needs and\n budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?\n

" } }, "RunningModeAutoStopTimeoutInMinutes": {