diff --git a/Package.swift b/Package.swift
index a469afdfc1..f3132390b2 100644
--- a/Package.swift
+++ b/Package.swift
@@ -276,7 +276,6 @@ let package = Package(
.library(name: "SotoMigrationHubOrchestrator", targets: ["SotoMigrationHubOrchestrator"]),
.library(name: "SotoMigrationHubRefactorSpaces", targets: ["SotoMigrationHubRefactorSpaces"]),
.library(name: "SotoMigrationHubStrategy", targets: ["SotoMigrationHubStrategy"]),
- .library(name: "SotoMobile", targets: ["SotoMobile"]),
.library(name: "SotoNeptune", targets: ["SotoNeptune"]),
.library(name: "SotoNeptuneGraph", targets: ["SotoNeptuneGraph"]),
.library(name: "SotoNeptunedata", targets: ["SotoNeptunedata"]),
@@ -352,6 +351,7 @@ let package = Package(
.library(name: "SotoSSM", targets: ["SotoSSM"]),
.library(name: "SotoSSMContacts", targets: ["SotoSSMContacts"]),
.library(name: "SotoSSMIncidents", targets: ["SotoSSMIncidents"]),
+ .library(name: "SotoSSMQuickSetup", targets: ["SotoSSMQuickSetup"]),
.library(name: "SotoSSO", targets: ["SotoSSO"]),
.library(name: "SotoSSOAdmin", targets: ["SotoSSOAdmin"]),
.library(name: "SotoSSOOIDC", targets: ["SotoSSOOIDC"]),
@@ -666,7 +666,6 @@ let package = Package(
.target(name: "SotoMigrationHubOrchestrator", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MigrationHubOrchestrator", swiftSettings: swiftSettings),
.target(name: "SotoMigrationHubRefactorSpaces", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MigrationHubRefactorSpaces", swiftSettings: swiftSettings),
.target(name: "SotoMigrationHubStrategy", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MigrationHubStrategy", swiftSettings: swiftSettings),
- .target(name: "SotoMobile", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Mobile", swiftSettings: swiftSettings),
.target(name: "SotoNeptune", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Neptune", swiftSettings: swiftSettings),
.target(name: "SotoNeptuneGraph", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/NeptuneGraph", swiftSettings: swiftSettings),
.target(name: "SotoNeptunedata", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Neptunedata", swiftSettings: swiftSettings),
@@ -742,6 +741,7 @@ let package = Package(
.target(name: "SotoSSM", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSM", swiftSettings: swiftSettings),
.target(name: "SotoSSMContacts", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSMContacts", swiftSettings: swiftSettings),
.target(name: "SotoSSMIncidents", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSMIncidents", swiftSettings: swiftSettings),
+ .target(name: "SotoSSMQuickSetup", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSMQuickSetup", swiftSettings: swiftSettings),
.target(name: "SotoSSO", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSO", swiftSettings: swiftSettings),
.target(name: "SotoSSOAdmin", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSOAdmin", swiftSettings: swiftSettings),
.target(name: "SotoSSOOIDC", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SSOOIDC", swiftSettings: swiftSettings),
diff --git a/Sources/Soto/Services/Amplify/Amplify_shapes.swift b/Sources/Soto/Services/Amplify/Amplify_shapes.swift
index ced72f282a..517209f3c6 100644
--- a/Sources/Soto/Services/Amplify/Amplify_shapes.swift
+++ b/Sources/Soto/Services/Amplify/Amplify_shapes.swift
@@ -26,6 +26,12 @@ import Foundation
extension Amplify {
// MARK: Enums
+ public enum CacheConfigType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case amplifyManaged = "AMPLIFY_MANAGED"
+ case amplifyManagedNoCookies = "AMPLIFY_MANAGED_NO_COOKIES"
+ public var description: String { return self.rawValue }
+ }
+
public enum CertificateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
case amplifyManaged = "AMPLIFY_MANAGED"
case custom = "CUSTOM"
@@ -114,6 +120,8 @@ extension Amplify {
public let basicAuthCredentials: String?
/// Describes the content of the build specification (build spec) for the Amplify app.
public let buildSpec: String?
+ /// The cache configuration for the Amplify app. If you don't specify the cache configuration type, Amplify uses the default AMPLIFY_MANAGED setting.
+ public let cacheConfig: CacheConfig?
/// Creates a date and time for the Amplify app.
public let createTime: Date
/// Describes the custom HTTP headers for the Amplify app.
@@ -151,13 +159,14 @@ extension Amplify {
/// Updates the date and time for the Amplify app.
public let updateTime: Date
- public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date) {
+ public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date) {
self.appArn = appArn
self.appId = appId
self.autoBranchCreationConfig = autoBranchCreationConfig
self.autoBranchCreationPatterns = autoBranchCreationPatterns
self.basicAuthCredentials = basicAuthCredentials
self.buildSpec = buildSpec
+ self.cacheConfig = cacheConfig
self.createTime = createTime
self.customHeaders = customHeaders
self.customRules = customRules
@@ -185,6 +194,7 @@ extension Amplify {
case autoBranchCreationPatterns = "autoBranchCreationPatterns"
case basicAuthCredentials = "basicAuthCredentials"
case buildSpec = "buildSpec"
+ case cacheConfig = "cacheConfig"
case createTime = "createTime"
case customHeaders = "customHeaders"
case customRules = "customRules"
@@ -462,12 +472,25 @@ extension Amplify {
}
}
+ public struct CacheConfig: AWSEncodableShape & AWSDecodableShape {
+ /// The type of cache configuration to use for an Amplify app. The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules. This is the default setting. The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key.
+ public let type: CacheConfigType
+
+ public init(type: CacheConfigType) {
+ self.type = type
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case type = "type"
+ }
+ }
+
public struct Certificate: AWSDecodableShape {
/// The DNS record for certificate verification.
public let certificateVerificationDNSRecord: String?
/// The Amazon resource name (ARN) for a custom certificate that you have already added to Certificate Manager in your Amazon Web Services account. This field is required only when the certificate type is CUSTOM.
public let customCertificateArn: String?
- /// The type of SSL/TLS certificate that you want to use. Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you. Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide .
+ /// The type of SSL/TLS certificate that you want to use. Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you. Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide.
public let type: CertificateType
public init(certificateVerificationDNSRecord: String? = nil, customCertificateArn: String? = nil, type: CertificateType) {
@@ -516,6 +539,8 @@ extension Amplify {
public let basicAuthCredentials: String?
/// The build specification (build spec) for an Amplify app.
public let buildSpec: String?
+ /// The cache configuration for the Amplify app.
+ public let cacheConfig: CacheConfig?
/// The custom HTTP headers for an Amplify app.
public let customHeaders: String?
/// The custom rewrite and redirect rules for an Amplify app.
@@ -545,12 +570,13 @@ extension Amplify {
/// The tag for an Amplify app.
public let tags: [String: String]?
- public init(accessToken: String? = nil, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil, tags: [String: String]? = nil) {
+ public init(accessToken: String? = nil, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil, tags: [String: String]? = nil) {
self.accessToken = accessToken
self.autoBranchCreationConfig = autoBranchCreationConfig
self.autoBranchCreationPatterns = autoBranchCreationPatterns
self.basicAuthCredentials = basicAuthCredentials
self.buildSpec = buildSpec
+ self.cacheConfig = cacheConfig
self.customHeaders = customHeaders
self.customRules = customRules
self.description = description
@@ -620,6 +646,7 @@ extension Amplify {
case autoBranchCreationPatterns = "autoBranchCreationPatterns"
case basicAuthCredentials = "basicAuthCredentials"
case buildSpec = "buildSpec"
+ case cacheConfig = "cacheConfig"
case customHeaders = "customHeaders"
case customRules = "customRules"
case description = "description"
@@ -2627,6 +2654,8 @@ extension Amplify {
public let basicAuthCredentials: String?
/// The build specification (build spec) for an Amplify app.
public let buildSpec: String?
+ /// The cache configuration for the Amplify app.
+ public let cacheConfig: CacheConfig?
/// The custom HTTP headers for an Amplify app.
public let customHeaders: String?
/// The custom redirect and rewrite rules for an Amplify app.
@@ -2654,13 +2683,14 @@ extension Amplify {
/// The name of the Git repository for an Amplify app.
public let repository: String?
- public init(accessToken: String? = nil, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String? = nil, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil) {
+ public init(accessToken: String? = nil, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, customHeaders: String? = nil, customRules: [CustomRule]? = nil, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool? = nil, enableBranchAutoBuild: Bool? = nil, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String? = nil, oauthToken: String? = nil, platform: Platform? = nil, repository: String? = nil) {
self.accessToken = accessToken
self.appId = appId
self.autoBranchCreationConfig = autoBranchCreationConfig
self.autoBranchCreationPatterns = autoBranchCreationPatterns
self.basicAuthCredentials = basicAuthCredentials
self.buildSpec = buildSpec
+ self.cacheConfig = cacheConfig
self.customHeaders = customHeaders
self.customRules = customRules
self.description = description
@@ -2685,6 +2715,7 @@ extension Amplify {
try container.encodeIfPresent(self.autoBranchCreationPatterns, forKey: .autoBranchCreationPatterns)
try container.encodeIfPresent(self.basicAuthCredentials, forKey: .basicAuthCredentials)
try container.encodeIfPresent(self.buildSpec, forKey: .buildSpec)
+ try container.encodeIfPresent(self.cacheConfig, forKey: .cacheConfig)
try container.encodeIfPresent(self.customHeaders, forKey: .customHeaders)
try container.encodeIfPresent(self.customRules, forKey: .customRules)
try container.encodeIfPresent(self.description, forKey: .description)
@@ -2748,6 +2779,7 @@ extension Amplify {
case autoBranchCreationPatterns = "autoBranchCreationPatterns"
case basicAuthCredentials = "basicAuthCredentials"
case buildSpec = "buildSpec"
+ case cacheConfig = "cacheConfig"
case customHeaders = "customHeaders"
case customRules = "customRules"
case description = "description"
diff --git a/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift b/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift
index 67d3c42e31..edf70456ee 100644
--- a/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift
+++ b/Sources/Soto/Services/AppIntegrations/AppIntegrations_api.swift
@@ -19,7 +19,7 @@
/// Service object for interacting with AWS AppIntegrations service.
///
-/// The Amazon AppIntegrations service enables you to configure and reuse connections to external applications. For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations and Deliver information to agents using Amazon Connect Wisdom in the Amazon Connect Administrator Guide.
+/// Amazon AppIntegrations actions Amazon AppIntegrations data types The Amazon AppIntegrations service enables you to configure and reuse connections to external applications. For information about how you can use external applications with Amazon Connect, see the following topics in the Amazon Connect Administrator Guide: Third-party applications (3p apps) in the agent workspace Use Amazon Q in Connect for generative AI–powered agent assistance in real-time
public struct AppIntegrations: AWSService {
// MARK: Member variables
@@ -73,7 +73,7 @@ public struct AppIntegrations: AWSService {
// MARK: API Calls
- /// This API is in preview release and subject to change. Creates and persists an Application resource.
+ /// Creates and persists an Application resource.
@Sendable
public func createApplication(_ input: CreateApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateApplicationResponse {
return try await self.client.execute(
@@ -99,6 +99,19 @@ public struct AppIntegrations: AWSService {
)
}
+ /// Creates and persists a DataIntegrationAssociation resource.
+ @Sendable
+ public func createDataIntegrationAssociation(_ input: CreateDataIntegrationAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataIntegrationAssociationResponse {
+ return try await self.client.execute(
+ operation: "CreateDataIntegrationAssociation",
+ path: "/dataIntegrations/{DataIntegrationIdentifier}/associations",
+ httpMethod: .POST,
+ serviceConfig: self.config,
+ input: input,
+ logger: logger
+ )
+ }
+
/// Creates an EventIntegration, given a specified name, description, and a reference to an Amazon EventBridge bus in your account and a partner event source that pushes events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane.
@Sendable
public func createEventIntegration(_ input: CreateEventIntegrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEventIntegrationResponse {
@@ -153,7 +166,7 @@ public struct AppIntegrations: AWSService {
)
}
- /// This API is in preview release and subject to change. Get an Application resource.
+ /// Get an Application resource.
@Sendable
public func getApplication(_ input: GetApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetApplicationResponse {
return try await self.client.execute(
@@ -207,7 +220,7 @@ public struct AppIntegrations: AWSService {
)
}
- /// This API is in preview release and subject to change. Lists applications in the account.
+ /// Lists applications in the account.
@Sendable
public func listApplications(_ input: ListApplicationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListApplicationsResponse {
return try await self.client.execute(
@@ -315,7 +328,7 @@ public struct AppIntegrations: AWSService {
)
}
- /// This API is in preview release and subject to change. Updates and persists an Application resource.
+ /// Updates and persists an Application resource.
@Sendable
public func updateApplication(_ input: UpdateApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateApplicationResponse {
return try await self.client.execute(
@@ -343,6 +356,19 @@ public struct AppIntegrations: AWSService {
)
}
+ /// Updates and persists a DataIntegrationAssociation resource. Updating a DataIntegrationAssociation with ExecutionConfiguration will rerun the on-demand job.
+ @Sendable
+ public func updateDataIntegrationAssociation(_ input: UpdateDataIntegrationAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataIntegrationAssociationResponse {
+ return try await self.client.execute(
+ operation: "UpdateDataIntegrationAssociation",
+ path: "/dataIntegrations/{DataIntegrationIdentifier}/associations/{DataIntegrationAssociationIdentifier}",
+ httpMethod: .PATCH,
+ serviceConfig: self.config,
+ input: input,
+ logger: logger
+ )
+ }
+
/// Updates the description of an event integration.
@Sendable
public func updateEventIntegration(_ input: UpdateEventIntegrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEventIntegrationResponse {
@@ -389,7 +415,7 @@ extension AppIntegrations {
)
}
- /// This API is in preview release and subject to change. Lists applications in the account.
+ /// Lists applications in the account.
/// Return PaginatorSequence for operation.
///
/// - Parameters:
diff --git a/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift b/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift
index f2944e9d97..49eac486c3 100644
--- a/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift
+++ b/Sources/Soto/Services/AppIntegrations/AppIntegrations_shapes.swift
@@ -26,6 +26,19 @@ import Foundation
extension AppIntegrations {
// MARK: Enums
+ public enum ExecutionMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case onDemand = "ON_DEMAND"
+ case scheduled = "SCHEDULED"
+ public var description: String { return self.rawValue }
+ }
+
+ public enum ExecutionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case completed = "COMPLETED"
+ case failed = "FAILED"
+ case inProgress = "IN_PROGRESS"
+ public var description: String { return self.rawValue }
+ }
+
// MARK: Shapes
public struct ApplicationAssociationSummary: AWSDecodableShape {
@@ -211,6 +224,99 @@ extension AppIntegrations {
}
}
+ public struct CreateDataIntegrationAssociationRequest: AWSEncodableShape {
+ /// The mapping of metadata to be extracted from the data.
+ public let clientAssociationMetadata: [String: String]?
+ /// The identifier for the client that is associated with the DataIntegration association.
+ public let clientId: String?
+ /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.
+ public let clientToken: String?
+ /// A unique identifier for the DataIntegration.
+ public let dataIntegrationIdentifier: String
+ /// The URI of the data destination.
+ public let destinationURI: String?
+ /// The configuration for how the files should be pulled from the source.
+ public let executionConfiguration: ExecutionConfiguration?
+ public let objectConfiguration: [String: [String: [String]]]?
+
+ public init(clientAssociationMetadata: [String: String]? = nil, clientId: String? = nil, clientToken: String? = CreateDataIntegrationAssociationRequest.idempotencyToken(), dataIntegrationIdentifier: String, destinationURI: String? = nil, executionConfiguration: ExecutionConfiguration? = nil, objectConfiguration: [String: [String: [String]]]? = nil) {
+ self.clientAssociationMetadata = clientAssociationMetadata
+ self.clientId = clientId
+ self.clientToken = clientToken
+ self.dataIntegrationIdentifier = dataIntegrationIdentifier
+ self.destinationURI = destinationURI
+ self.executionConfiguration = executionConfiguration
+ self.objectConfiguration = objectConfiguration
+ }
+
+ public func encode(to encoder: Encoder) throws {
+ let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
+ var container = encoder.container(keyedBy: CodingKeys.self)
+ try container.encodeIfPresent(self.clientAssociationMetadata, forKey: .clientAssociationMetadata)
+ try container.encodeIfPresent(self.clientId, forKey: .clientId)
+ try container.encodeIfPresent(self.clientToken, forKey: .clientToken)
+ request.encodePath(self.dataIntegrationIdentifier, key: "DataIntegrationIdentifier")
+ try container.encodeIfPresent(self.destinationURI, forKey: .destinationURI)
+ try container.encodeIfPresent(self.executionConfiguration, forKey: .executionConfiguration)
+ try container.encodeIfPresent(self.objectConfiguration, forKey: .objectConfiguration)
+ }
+
+ public func validate(name: String) throws {
+ try self.clientAssociationMetadata?.forEach {
+ try validate($0.key, name: "clientAssociationMetadata.key", parent: name, max: 255)
+ try validate($0.key, name: "clientAssociationMetadata.key", parent: name, min: 1)
+ try validate($0.key, name: "clientAssociationMetadata.key", parent: name, pattern: "\\S")
+ try validate($0.value, name: "clientAssociationMetadata[\"\($0.key)\"]", parent: name, max: 255)
+ try validate($0.value, name: "clientAssociationMetadata[\"\($0.key)\"]", parent: name, min: 1)
+ try validate($0.value, name: "clientAssociationMetadata[\"\($0.key)\"]", parent: name, pattern: "\\S")
+ }
+ try self.validate(self.clientId, name: "clientId", parent: name, max: 255)
+ try self.validate(self.clientId, name: "clientId", parent: name, min: 1)
+ try self.validate(self.clientId, name: "clientId", parent: name, pattern: ".*")
+ try self.validate(self.clientToken, name: "clientToken", parent: name, max: 2048)
+ try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1)
+ try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: ".*")
+ try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, max: 255)
+ try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, min: 1)
+ try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, pattern: "\\S")
+ try self.validate(self.destinationURI, name: "destinationURI", parent: name, max: 1000)
+ try self.validate(self.destinationURI, name: "destinationURI", parent: name, min: 1)
+ try self.validate(self.destinationURI, name: "destinationURI", parent: name, pattern: "^(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+$)|(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+[\\w/!@#+=.-]+[\\w/!@#+=.,-]+$)$")
+ try self.executionConfiguration?.validate(name: "\(name).executionConfiguration")
+ try self.objectConfiguration?.forEach {
+ try validate($0.key, name: "objectConfiguration.key", parent: name, max: 255)
+ try validate($0.key, name: "objectConfiguration.key", parent: name, min: 1)
+ try validate($0.key, name: "objectConfiguration.key", parent: name, pattern: "\\S")
+ }
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case clientAssociationMetadata = "ClientAssociationMetadata"
+ case clientId = "ClientId"
+ case clientToken = "ClientToken"
+ case destinationURI = "DestinationURI"
+ case executionConfiguration = "ExecutionConfiguration"
+ case objectConfiguration = "ObjectConfiguration"
+ }
+ }
+
+ public struct CreateDataIntegrationAssociationResponse: AWSDecodableShape {
+ /// The Amazon Resource Name (ARN) for the DataIntegration.
+ public let dataIntegrationArn: String?
+ /// A unique identifier. for the DataIntegrationAssociation.
+ public let dataIntegrationAssociationId: String?
+
+ public init(dataIntegrationArn: String? = nil, dataIntegrationAssociationId: String? = nil) {
+ self.dataIntegrationArn = dataIntegrationArn
+ self.dataIntegrationAssociationId = dataIntegrationAssociationId
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case dataIntegrationArn = "DataIntegrationArn"
+ case dataIntegrationAssociationId = "DataIntegrationAssociationId"
+ }
+ }
+
public struct CreateDataIntegrationRequest: AWSEncodableShape {
/// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.
public let clientToken: String?
@@ -218,7 +324,7 @@ extension AppIntegrations {
public let description: String?
/// The configuration for what files should be pulled from the source.
public let fileConfiguration: FileConfiguration?
- /// The KMS key for the DataIntegration.
+ /// The KMS key ARN for the DataIntegration.
public let kmsKey: String
/// The name of the DataIntegration.
public let name: String
@@ -227,11 +333,11 @@ extension AppIntegrations {
/// The name of the data and how often it should be pulled from the source.
public let scheduleConfig: ScheduleConfiguration?
/// The URI of the data source.
- public let sourceURI: String
+ public let sourceURI: String?
/// The tags used to organize, track, or control access for this resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }.
public let tags: [String: String]?
- public init(clientToken: String? = CreateDataIntegrationRequest.idempotencyToken(), description: String? = nil, fileConfiguration: FileConfiguration? = nil, kmsKey: String, name: String, objectConfiguration: [String: [String: [String]]]? = nil, scheduleConfig: ScheduleConfiguration? = nil, sourceURI: String, tags: [String: String]? = nil) {
+ public init(clientToken: String? = CreateDataIntegrationRequest.idempotencyToken(), description: String? = nil, fileConfiguration: FileConfiguration? = nil, kmsKey: String, name: String, objectConfiguration: [String: [String: [String]]]? = nil, scheduleConfig: ScheduleConfiguration? = nil, sourceURI: String? = nil, tags: [String: String]? = nil) {
self.clientToken = clientToken
self.description = description
self.fileConfiguration = fileConfiguration
@@ -299,7 +405,7 @@ extension AppIntegrations {
public let fileConfiguration: FileConfiguration?
/// A unique identifier.
public let id: String?
- /// The KMS key for the DataIntegration.
+ /// The KMS key ARN for the DataIntegration.
public let kmsKey: String?
/// The name of the DataIntegration.
public let name: String?
@@ -417,17 +523,28 @@ extension AppIntegrations {
public let dataIntegrationArn: String?
/// The Amazon Resource Name (ARN) of the DataIntegration association.
public let dataIntegrationAssociationArn: String?
+ /// The URI of the data destination.
+ public let destinationURI: String?
+ public let executionConfiguration: ExecutionConfiguration?
+ /// The execution status of the last job.
+ public let lastExecutionStatus: LastExecutionStatus?
- public init(clientId: String? = nil, dataIntegrationArn: String? = nil, dataIntegrationAssociationArn: String? = nil) {
+ public init(clientId: String? = nil, dataIntegrationArn: String? = nil, dataIntegrationAssociationArn: String? = nil, destinationURI: String? = nil, executionConfiguration: ExecutionConfiguration? = nil, lastExecutionStatus: LastExecutionStatus? = nil) {
self.clientId = clientId
self.dataIntegrationArn = dataIntegrationArn
self.dataIntegrationAssociationArn = dataIntegrationAssociationArn
+ self.destinationURI = destinationURI
+ self.executionConfiguration = executionConfiguration
+ self.lastExecutionStatus = lastExecutionStatus
}
private enum CodingKeys: String, CodingKey {
case clientId = "ClientId"
case dataIntegrationArn = "DataIntegrationArn"
case dataIntegrationAssociationArn = "DataIntegrationAssociationArn"
+ case destinationURI = "DestinationURI"
+ case executionConfiguration = "ExecutionConfiguration"
+ case lastExecutionStatus = "LastExecutionStatus"
}
}
@@ -618,6 +735,30 @@ extension AppIntegrations {
}
}
+ public struct ExecutionConfiguration: AWSEncodableShape & AWSDecodableShape {
+ /// The mode for data import/export execution.
+ public let executionMode: ExecutionMode
+ public let onDemandConfiguration: OnDemandConfiguration?
+ public let scheduleConfiguration: ScheduleConfiguration?
+
+ public init(executionMode: ExecutionMode, onDemandConfiguration: OnDemandConfiguration? = nil, scheduleConfiguration: ScheduleConfiguration? = nil) {
+ self.executionMode = executionMode
+ self.onDemandConfiguration = onDemandConfiguration
+ self.scheduleConfiguration = scheduleConfiguration
+ }
+
+ public func validate(name: String) throws {
+ try self.onDemandConfiguration?.validate(name: "\(name).onDemandConfiguration")
+ try self.scheduleConfiguration?.validate(name: "\(name).scheduleConfiguration")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case executionMode = "ExecutionMode"
+ case onDemandConfiguration = "OnDemandConfiguration"
+ case scheduleConfiguration = "ScheduleConfiguration"
+ }
+ }
+
public struct ExternalUrlConfig: AWSEncodableShape & AWSDecodableShape {
/// The URL to access the application.
public let accessUrl: String
@@ -804,13 +945,13 @@ extension AppIntegrations {
public struct GetDataIntegrationResponse: AWSDecodableShape {
/// The Amazon Resource Name (ARN) for the DataIntegration.
public let arn: String?
- /// The KMS key for the DataIntegration.
+ /// The KMS key ARN for the DataIntegration.
public let description: String?
/// The configuration for what files should be pulled from the source.
public let fileConfiguration: FileConfiguration?
/// A unique identifier.
public let id: String?
- /// The KMS key for the DataIntegration.
+ /// The KMS key ARN for the DataIntegration.
public let kmsKey: String?
/// The name of the DataIntegration.
public let name: String?
@@ -906,6 +1047,23 @@ extension AppIntegrations {
}
}
+ public struct LastExecutionStatus: AWSDecodableShape {
+ /// The job status enum string.
+ public let executionStatus: ExecutionStatus?
+ /// The status message of a job.
+ public let statusMessage: String?
+
+ public init(executionStatus: ExecutionStatus? = nil, statusMessage: String? = nil) {
+ self.executionStatus = executionStatus
+ self.statusMessage = statusMessage
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case executionStatus = "ExecutionStatus"
+ case statusMessage = "StatusMessage"
+ }
+ }
+
public struct ListApplicationAssociationsRequest: AWSEncodableShape {
/// A unique identifier for the Application.
public let applicationId: String
@@ -1245,6 +1403,32 @@ extension AppIntegrations {
}
}
+ public struct OnDemandConfiguration: AWSEncodableShape & AWSDecodableShape {
+ /// The end time for data pull from the source as an Unix/epoch string in milliseconds
+ public let endTime: String?
+ /// The start time for data pull from the source as an Unix/epoch string in milliseconds
+ public let startTime: String
+
+ public init(endTime: String? = nil, startTime: String) {
+ self.endTime = endTime
+ self.startTime = startTime
+ }
+
+ public func validate(name: String) throws {
+ try self.validate(self.endTime, name: "endTime", parent: name, max: 255)
+ try self.validate(self.endTime, name: "endTime", parent: name, min: 1)
+ try self.validate(self.endTime, name: "endTime", parent: name, pattern: "\\S")
+ try self.validate(self.startTime, name: "startTime", parent: name, max: 255)
+ try self.validate(self.startTime, name: "startTime", parent: name, min: 1)
+ try self.validate(self.startTime, name: "startTime", parent: name, pattern: "\\S")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case endTime = "EndTime"
+ case startTime = "StartTime"
+ }
+ }
+
public struct Publication: AWSEncodableShape & AWSDecodableShape {
/// The description of the publication.
public let description: String?
@@ -1503,6 +1687,47 @@ extension AppIntegrations {
public init() {}
}
+ public struct UpdateDataIntegrationAssociationRequest: AWSEncodableShape {
+ /// A unique identifier. of the DataIntegrationAssociation resource
+ public let dataIntegrationAssociationIdentifier: String
+ /// A unique identifier for the DataIntegration.
+ public let dataIntegrationIdentifier: String
+ /// The configuration for how the files should be pulled from the source.
+ public let executionConfiguration: ExecutionConfiguration
+
+ public init(dataIntegrationAssociationIdentifier: String, dataIntegrationIdentifier: String, executionConfiguration: ExecutionConfiguration) {
+ self.dataIntegrationAssociationIdentifier = dataIntegrationAssociationIdentifier
+ self.dataIntegrationIdentifier = dataIntegrationIdentifier
+ self.executionConfiguration = executionConfiguration
+ }
+
+ public func encode(to encoder: Encoder) throws {
+ let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
+ var container = encoder.container(keyedBy: CodingKeys.self)
+ request.encodePath(self.dataIntegrationAssociationIdentifier, key: "DataIntegrationAssociationIdentifier")
+ request.encodePath(self.dataIntegrationIdentifier, key: "DataIntegrationIdentifier")
+ try container.encode(self.executionConfiguration, forKey: .executionConfiguration)
+ }
+
+ public func validate(name: String) throws {
+ try self.validate(self.dataIntegrationAssociationIdentifier, name: "dataIntegrationAssociationIdentifier", parent: name, max: 255)
+ try self.validate(self.dataIntegrationAssociationIdentifier, name: "dataIntegrationAssociationIdentifier", parent: name, min: 1)
+ try self.validate(self.dataIntegrationAssociationIdentifier, name: "dataIntegrationAssociationIdentifier", parent: name, pattern: "\\S")
+ try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, max: 255)
+ try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, min: 1)
+ try self.validate(self.dataIntegrationIdentifier, name: "dataIntegrationIdentifier", parent: name, pattern: "\\S")
+ try self.executionConfiguration.validate(name: "\(name).executionConfiguration")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case executionConfiguration = "ExecutionConfiguration"
+ }
+ }
+
+ public struct UpdateDataIntegrationAssociationResponse: AWSDecodableShape {
+ public init() {}
+ }
+
public struct UpdateDataIntegrationRequest: AWSEncodableShape {
/// A description of the DataIntegration.
public let description: String?
diff --git a/Sources/Soto/Services/AppStream/AppStream_api.swift b/Sources/Soto/Services/AppStream/AppStream_api.swift
index 0ea8c3a1e0..fd10d03d33 100644
--- a/Sources/Soto/Services/AppStream/AppStream_api.swift
+++ b/Sources/Soto/Services/AppStream/AppStream_api.swift
@@ -319,6 +319,19 @@ public struct AppStream: AWSService {
)
}
+ /// Creates custom branding that customizes the appearance of the streaming application catalog page.
+ @Sendable
+ public func createThemeForStack(_ input: CreateThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateThemeForStackResult {
+ return try await self.client.execute(
+ operation: "CreateThemeForStack",
+ path: "/",
+ httpMethod: .POST,
+ serviceConfig: self.config,
+ input: input,
+ logger: logger
+ )
+ }
+
/// Creates a new image with the latest Windows operating system updates, driver updates, and AppStream 2.0 agent software. For more information, see the "Update an Image by Using Managed AppStream 2.0 Image Updates" section in Administer Your AppStream 2.0 Images, in the Amazon AppStream 2.0 Administration Guide.
@Sendable
public func createUpdatedImage(_ input: CreateUpdatedImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUpdatedImageResult {
@@ -488,6 +501,19 @@ public struct AppStream: AWSService {
)
}
+ /// Deletes custom branding that customizes the appearance of the streaming application catalog page.
+ @Sendable
+ public func deleteThemeForStack(_ input: DeleteThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteThemeForStackResult {
+ return try await self.client.execute(
+ operation: "DeleteThemeForStack",
+ path: "/",
+ httpMethod: .POST,
+ serviceConfig: self.config,
+ input: input,
+ logger: logger
+ )
+ }
+
/// Disables usage report generation.
@Sendable
public func deleteUsageReportSubscription(_ input: DeleteUsageReportSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUsageReportSubscriptionResult {
@@ -683,6 +709,19 @@ public struct AppStream: AWSService {
)
}
+ /// Retrieves a list that describes the theme for a specified stack. A theme is custom branding that customizes the appearance of the streaming application catalog page.
+ @Sendable
+ public func describeThemeForStack(_ input: DescribeThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeThemeForStackResult {
+ return try await self.client.execute(
+ operation: "DescribeThemeForStack",
+ path: "/",
+ httpMethod: .POST,
+ serviceConfig: self.config,
+ input: input,
+ logger: logger
+ )
+ }
+
/// Retrieves a list that describes one or more usage report subscriptions.
@Sendable
public func describeUsageReportSubscriptions(_ input: DescribeUsageReportSubscriptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUsageReportSubscriptionsResult {
@@ -1059,6 +1098,19 @@ public struct AppStream: AWSService {
logger: logger
)
}
+
+ /// Updates custom branding that customizes the appearance of the streaming application catalog page.
+ @Sendable
+ public func updateThemeForStack(_ input: UpdateThemeForStackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateThemeForStackResult {
+ return try await self.client.execute(
+ operation: "UpdateThemeForStack",
+ path: "/",
+ httpMethod: .POST,
+ serviceConfig: self.config,
+ input: input,
+ logger: logger
+ )
+ }
}
extension AppStream {
diff --git a/Sources/Soto/Services/AppStream/AppStream_shapes.swift b/Sources/Soto/Services/AppStream/AppStream_shapes.swift
index f4616eb829..92bf2460ed 100644
--- a/Sources/Soto/Services/AppStream/AppStream_shapes.swift
+++ b/Sources/Soto/Services/AppStream/AppStream_shapes.swift
@@ -100,6 +100,12 @@ extension AppStream {
public var description: String { return self.rawValue }
}
+ public enum DynamicAppProvidersEnabled: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case disabled = "DISABLED"
+ case enabled = "ENABLED"
+ public var description: String { return self.rawValue }
+ }
+
public enum FleetAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
case domainJoinInfo = "DOMAIN_JOIN_INFO"
case iamRoleArn = "IAM_ROLE_ARN"
@@ -181,6 +187,12 @@ extension AppStream {
public var description: String { return self.rawValue }
}
+ public enum ImageSharedWithOthers: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case `false` = "FALSE"
+ case `true` = "TRUE"
+ public var description: String { return self.rawValue }
+ }
+
public enum ImageState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
case available = "AVAILABLE"
case copying = "COPYING"
@@ -199,6 +211,12 @@ extension AppStream {
public var description: String { return self.rawValue }
}
+ public enum LatestAppstreamAgentVersion: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case `false` = "FALSE"
+ case `true` = "TRUE"
+ public var description: String { return self.rawValue }
+ }
+
public enum MessageAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
case resend = "RESEND"
case suppress = "SUPPRESS"
@@ -219,6 +237,7 @@ extension AppStream {
public enum PlatformType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
case amazonLinux2 = "AMAZON_LINUX2"
+ case rhel8 = "RHEL8"
case windows = "WINDOWS"
case windowsServer2016 = "WINDOWS_SERVER_2016"
case windowsServer2019 = "WINDOWS_SERVER_2019"
@@ -280,6 +299,25 @@ extension AppStream {
public var description: String { return self.rawValue }
}
+ public enum ThemeAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case footerLinks = "FOOTER_LINKS"
+ public var description: String { return self.rawValue }
+ }
+
+ public enum ThemeState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case disabled = "DISABLED"
+ case enabled = "ENABLED"
+ public var description: String { return self.rawValue }
+ }
+
+ public enum ThemeStyling: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
+ case blue = "BLUE"
+ case lightBlue = "LIGHT_BLUE"
+ case pink = "PINK"
+ case red = "RED"
+ public var description: String { return self.rawValue }
+ }
+
public enum UsageReportExecutionErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
case accessDenied = "ACCESS_DENIED"
case internalServiceError = "INTERNAL_SERVICE_ERROR"
@@ -1316,7 +1354,7 @@ extension AppStream {
public let computeCapacity: ComputeCapacity?
/// The description to display.
public let description: String?
- /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000.
+ /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000.
public let disconnectTimeoutInSeconds: Int?
/// The fleet name to display.
public let displayName: String?
@@ -1328,13 +1366,13 @@ extension AppStream {
public let fleetType: FleetType?
/// The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the appstream_machine_role credential profile on the instance. For more information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide.
public let iamRoleArn: String?
- /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.
+ /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.
public let idleDisconnectTimeoutInSeconds: Int?
/// The ARN of the public, private, or shared image to use.
public let imageArn: String?
/// The name of the image used to create the fleet.
public let imageName: String?
- /// The instance type to use when launching fleet instances. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge
+ /// The instance type to use when launching fleet instances. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics.g5.xlarge stream.graphics.g5.2xlarge stream.graphics.g5.4xlarge stream.graphics.g5.8xlarge stream.graphics.g5.12xlarge stream.graphics.g5.16xlarge stream.graphics.g5.24xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge
public let instanceType: String?
/// The maximum concurrent sessions of the Elastic fleet. This is required for Elastic fleets, and not allowed for other fleet types.
public let maxConcurrentSessions: Int?
@@ -1750,6 +1788,64 @@ extension AppStream {
}
}
+ public struct CreateThemeForStackRequest: AWSEncodableShape {
+ /// The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions.
+ public let faviconS3Location: S3Location?
+ /// The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites.
+ public let footerLinks: [ThemeFooterLink]?
+ /// The organization logo that appears on the streaming application catalog page.
+ public let organizationLogoS3Location: S3Location?
+ /// The name of the stack for the theme.
+ public let stackName: String?
+ /// The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page.
+ public let themeStyling: ThemeStyling?
+ /// The title that is displayed at the top of the browser tab during users' application streaming sessions.
+ public let titleText: String?
+
+ public init(faviconS3Location: S3Location? = nil, footerLinks: [ThemeFooterLink]? = nil, organizationLogoS3Location: S3Location? = nil, stackName: String? = nil, themeStyling: ThemeStyling? = nil, titleText: String? = nil) {
+ self.faviconS3Location = faviconS3Location
+ self.footerLinks = footerLinks
+ self.organizationLogoS3Location = organizationLogoS3Location
+ self.stackName = stackName
+ self.themeStyling = themeStyling
+ self.titleText = titleText
+ }
+
+ public func validate(name: String) throws {
+ try self.faviconS3Location?.validate(name: "\(name).faviconS3Location")
+ try self.footerLinks?.forEach {
+ try $0.validate(name: "\(name).footerLinks[]")
+ }
+ try self.organizationLogoS3Location?.validate(name: "\(name).organizationLogoS3Location")
+ try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$")
+ try self.validate(self.titleText, name: "titleText", parent: name, max: 300)
+ try self.validate(self.titleText, name: "titleText", parent: name, min: 1)
+ try self.validate(self.titleText, name: "titleText", parent: name, pattern: "^[-@./#&+\\w\\s]*$")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case faviconS3Location = "FaviconS3Location"
+ case footerLinks = "FooterLinks"
+ case organizationLogoS3Location = "OrganizationLogoS3Location"
+ case stackName = "StackName"
+ case themeStyling = "ThemeStyling"
+ case titleText = "TitleText"
+ }
+ }
+
+ public struct CreateThemeForStackResult: AWSDecodableShape {
+ /// The theme object that contains the metadata of the custom branding.
+ public let theme: Theme?
+
+ public init(theme: Theme? = nil) {
+ self.theme = theme
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case theme = "Theme"
+ }
+ }
+
public struct CreateUpdatedImageRequest: AWSEncodableShape {
/// Indicates whether to display the status of image update availability before AppStream 2.0 initiates the process of creating a new updated image. If this value is set to true, AppStream 2.0 displays whether image updates are available. If this value is set to false, AppStream 2.0 initiates the process of creating a new updated image without displaying whether image updates are available.
public let dryRun: Bool?
@@ -2113,6 +2209,27 @@ extension AppStream {
public init() {}
}
+ public struct DeleteThemeForStackRequest: AWSEncodableShape {
+ /// The name of the stack for the theme.
+ public let stackName: String?
+
+ public init(stackName: String? = nil) {
+ self.stackName = stackName
+ }
+
+ public func validate(name: String) throws {
+ try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case stackName = "StackName"
+ }
+ }
+
+ public struct DeleteThemeForStackResult: AWSDecodableShape {
+ public init() {}
+ }
+
public struct DeleteUsageReportSubscriptionRequest: AWSEncodableShape {
public init() {}
}
@@ -2775,6 +2892,36 @@ extension AppStream {
}
}
+ public struct DescribeThemeForStackRequest: AWSEncodableShape {
+ /// The name of the stack for the theme.
+ public let stackName: String?
+
+ public init(stackName: String? = nil) {
+ self.stackName = stackName
+ }
+
+ public func validate(name: String) throws {
+ try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case stackName = "StackName"
+ }
+ }
+
+ public struct DescribeThemeForStackResult: AWSDecodableShape {
+ /// The theme object that contains the metadata of the custom branding.
+ public let theme: Theme?
+
+ public init(theme: Theme? = nil) {
+ self.theme = theme
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case theme = "Theme"
+ }
+ }
+
public struct DescribeUsageReportSubscriptionsRequest: AWSEncodableShape {
/// The maximum size of each page of results.
public let maxResults: Int?
@@ -3243,7 +3390,7 @@ extension AppStream {
public let createdTime: Date?
/// The description to display.
public let description: String?
- /// The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000.
+ /// The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000.
public let disconnectTimeoutInSeconds: Int?
/// The fleet name to display.
public let displayName: String?
@@ -3257,7 +3404,7 @@ extension AppStream {
public let fleetType: FleetType?
/// The ARN of the IAM role that is applied to the fleet. To assume a role, the fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the appstream_machine_role credential profile on the instance. For more information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide.
public let iamRoleArn: String?
- /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.
+ /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.
public let idleDisconnectTimeoutInSeconds: Int?
/// The ARN for the public, private, or shared image.
public let imageArn: String?
@@ -3375,6 +3522,8 @@ extension AppStream {
public let description: String?
/// The image name to display.
public let displayName: String?
+ /// Indicates whether dynamic app providers are enabled within an AppStream 2.0 image or not.
+ public let dynamicAppProvidersEnabled: DynamicAppProvidersEnabled?
/// The name of the image builder that was used to create the private image. If the image is shared, this value is null.
public let imageBuilderName: String?
/// Indicates whether an image builder can be launched from this image.
@@ -3383,6 +3532,10 @@ extension AppStream {
public let imageErrors: [ResourceError]?
/// The permissions to provide to the destination AWS account for the specified image.
public let imagePermissions: ImagePermissions?
+ /// Indicates whether the image is shared with another account ID.
+ public let imageSharedWithOthers: ImageSharedWithOthers?
+ /// Indicates whether the image is using the latest AppStream 2.0 agent version or not.
+ public let latestAppstreamAgentVersion: LatestAppstreamAgentVersion?
/// The name of the image.
public let name: String?
/// The operating system platform of the image.
@@ -3393,10 +3546,12 @@ extension AppStream {
public let state: ImageState?
/// The reason why the last state change occurred.
public let stateChangeReason: ImageStateChangeReason?
+ /// The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported: General Purpose Compute Optimized Memory Optimized Graphics Graphics Design Graphics Pro Graphics G4 Graphics G5
+ public let supportedInstanceFamilies: [String]?
/// Indicates whether the image is public or private.
public let visibility: VisibilityType?
- public init(applications: [Application]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, baseImageArn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, imageBuilderName: String? = nil, imageBuilderSupported: Bool? = nil, imageErrors: [ResourceError]? = nil, imagePermissions: ImagePermissions? = nil, name: String? = nil, platform: PlatformType? = nil, publicBaseImageReleasedDate: Date? = nil, state: ImageState? = nil, stateChangeReason: ImageStateChangeReason? = nil, visibility: VisibilityType? = nil) {
+ public init(applications: [Application]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, baseImageArn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, dynamicAppProvidersEnabled: DynamicAppProvidersEnabled? = nil, imageBuilderName: String? = nil, imageBuilderSupported: Bool? = nil, imageErrors: [ResourceError]? = nil, imagePermissions: ImagePermissions? = nil, imageSharedWithOthers: ImageSharedWithOthers? = nil, latestAppstreamAgentVersion: LatestAppstreamAgentVersion? = nil, name: String? = nil, platform: PlatformType? = nil, publicBaseImageReleasedDate: Date? = nil, state: ImageState? = nil, stateChangeReason: ImageStateChangeReason? = nil, supportedInstanceFamilies: [String]? = nil, visibility: VisibilityType? = nil) {
self.applications = applications
self.appstreamAgentVersion = appstreamAgentVersion
self.arn = arn
@@ -3404,15 +3559,19 @@ extension AppStream {
self.createdTime = createdTime
self.description = description
self.displayName = displayName
+ self.dynamicAppProvidersEnabled = dynamicAppProvidersEnabled
self.imageBuilderName = imageBuilderName
self.imageBuilderSupported = imageBuilderSupported
self.imageErrors = imageErrors
self.imagePermissions = imagePermissions
+ self.imageSharedWithOthers = imageSharedWithOthers
+ self.latestAppstreamAgentVersion = latestAppstreamAgentVersion
self.name = name
self.platform = platform
self.publicBaseImageReleasedDate = publicBaseImageReleasedDate
self.state = state
self.stateChangeReason = stateChangeReason
+ self.supportedInstanceFamilies = supportedInstanceFamilies
self.visibility = visibility
}
@@ -3424,15 +3583,19 @@ extension AppStream {
case createdTime = "CreatedTime"
case description = "Description"
case displayName = "DisplayName"
+ case dynamicAppProvidersEnabled = "DynamicAppProvidersEnabled"
case imageBuilderName = "ImageBuilderName"
case imageBuilderSupported = "ImageBuilderSupported"
case imageErrors = "ImageErrors"
case imagePermissions = "ImagePermissions"
+ case imageSharedWithOthers = "ImageSharedWithOthers"
+ case latestAppstreamAgentVersion = "LatestAppstreamAgentVersion"
case name = "Name"
case platform = "Platform"
case publicBaseImageReleasedDate = "PublicBaseImageReleasedDate"
case state = "State"
case stateChangeReason = "StateChangeReason"
+ case supportedInstanceFamilies = "SupportedInstanceFamilies"
case visibility = "Visibility"
}
}
@@ -3462,6 +3625,8 @@ extension AppStream {
public let imageBuilderErrors: [ResourceError]?
/// The instance type for the image builder. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge
public let instanceType: String?
+ /// Indicates whether the image builder is using the latest AppStream 2.0 agent version or not.
+ public let latestAppstreamAgentVersion: LatestAppstreamAgentVersion?
/// The name of the image builder.
public let name: String?
public let networkAccessConfiguration: NetworkAccessConfiguration?
@@ -3474,7 +3639,7 @@ extension AppStream {
/// The VPC configuration of the image builder.
public let vpcConfig: VpcConfig?
- public init(accessEndpoints: [AccessEndpoint]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, domainJoinInfo: DomainJoinInfo? = nil, enableDefaultInternetAccess: Bool? = nil, iamRoleArn: String? = nil, imageArn: String? = nil, imageBuilderErrors: [ResourceError]? = nil, instanceType: String? = nil, name: String? = nil, networkAccessConfiguration: NetworkAccessConfiguration? = nil, platform: PlatformType? = nil, state: ImageBuilderState? = nil, stateChangeReason: ImageBuilderStateChangeReason? = nil, vpcConfig: VpcConfig? = nil) {
+ public init(accessEndpoints: [AccessEndpoint]? = nil, appstreamAgentVersion: String? = nil, arn: String? = nil, createdTime: Date? = nil, description: String? = nil, displayName: String? = nil, domainJoinInfo: DomainJoinInfo? = nil, enableDefaultInternetAccess: Bool? = nil, iamRoleArn: String? = nil, imageArn: String? = nil, imageBuilderErrors: [ResourceError]? = nil, instanceType: String? = nil, latestAppstreamAgentVersion: LatestAppstreamAgentVersion? = nil, name: String? = nil, networkAccessConfiguration: NetworkAccessConfiguration? = nil, platform: PlatformType? = nil, state: ImageBuilderState? = nil, stateChangeReason: ImageBuilderStateChangeReason? = nil, vpcConfig: VpcConfig? = nil) {
self.accessEndpoints = accessEndpoints
self.appstreamAgentVersion = appstreamAgentVersion
self.arn = arn
@@ -3487,6 +3652,7 @@ extension AppStream {
self.imageArn = imageArn
self.imageBuilderErrors = imageBuilderErrors
self.instanceType = instanceType
+ self.latestAppstreamAgentVersion = latestAppstreamAgentVersion
self.name = name
self.networkAccessConfiguration = networkAccessConfiguration
self.platform = platform
@@ -3508,6 +3674,7 @@ extension AppStream {
case imageArn = "ImageArn"
case imageBuilderErrors = "ImageBuilderErrors"
case instanceType = "InstanceType"
+ case latestAppstreamAgentVersion = "LatestAppstreamAgentVersion"
case name = "Name"
case networkAccessConfiguration = "NetworkAccessConfiguration"
case platform = "Platform"
@@ -4254,6 +4421,72 @@ extension AppStream {
public init() {}
}
+ public struct Theme: AWSDecodableShape {
+ /// The time the theme was created.
+ public let createdTime: Date?
+ /// The stack that has the custom branding theme.
+ public let stackName: String?
+ /// The state of the theme.
+ public let state: ThemeState?
+ /// The URL of the icon that displays at the top of a user's browser tab during streaming sessions.
+ public let themeFaviconURL: String?
+ /// The website links that display in the catalog page footer.
+ public let themeFooterLinks: [ThemeFooterLink]?
+ /// The URL of the logo that displays in the catalog page header.
+ public let themeOrganizationLogoURL: String?
+ /// The color that is used for the website links, text, buttons, and catalog page background.
+ public let themeStyling: ThemeStyling?
+ /// The browser tab page title.
+ public let themeTitleText: String?
+
+ public init(createdTime: Date? = nil, stackName: String? = nil, state: ThemeState? = nil, themeFaviconURL: String? = nil, themeFooterLinks: [ThemeFooterLink]? = nil, themeOrganizationLogoURL: String? = nil, themeStyling: ThemeStyling? = nil, themeTitleText: String? = nil) {
+ self.createdTime = createdTime
+ self.stackName = stackName
+ self.state = state
+ self.themeFaviconURL = themeFaviconURL
+ self.themeFooterLinks = themeFooterLinks
+ self.themeOrganizationLogoURL = themeOrganizationLogoURL
+ self.themeStyling = themeStyling
+ self.themeTitleText = themeTitleText
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case createdTime = "CreatedTime"
+ case stackName = "StackName"
+ case state = "State"
+ case themeFaviconURL = "ThemeFaviconURL"
+ case themeFooterLinks = "ThemeFooterLinks"
+ case themeOrganizationLogoURL = "ThemeOrganizationLogoURL"
+ case themeStyling = "ThemeStyling"
+ case themeTitleText = "ThemeTitleText"
+ }
+ }
+
+ public struct ThemeFooterLink: AWSEncodableShape & AWSDecodableShape {
+ /// The name of the websites that display in the catalog page footer.
+ public let displayName: String?
+ /// The URL of the websites that display in the catalog page footer.
+ public let footerLinkURL: String?
+
+ public init(displayName: String? = nil, footerLinkURL: String? = nil) {
+ self.displayName = displayName
+ self.footerLinkURL = footerLinkURL
+ }
+
+ public func validate(name: String) throws {
+ try self.validate(self.displayName, name: "displayName", parent: name, max: 300)
+ try self.validate(self.displayName, name: "displayName", parent: name, min: 1)
+ try self.validate(self.displayName, name: "displayName", parent: name, pattern: "^[-@./#&+\\w\\s]*$")
+ try self.validate(self.footerLinkURL, name: "footerLinkURL", parent: name, max: 1000)
+ try self.validate(self.footerLinkURL, name: "footerLinkURL", parent: name, min: 1)
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case displayName = "DisplayName"
+ case footerLinkURL = "FooterLinkURL"
+ }
+ }
+
public struct UntagResourceRequest: AWSEncodableShape {
/// The Amazon Resource Name (ARN) of the resource.
public let resourceArn: String?
@@ -4537,7 +4770,7 @@ extension AppStream {
public let deleteVpcConfig: Bool?
/// The description to display.
public let description: String?
- /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000.
+ /// The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000.
public let disconnectTimeoutInSeconds: Int?
/// The fleet name to display.
public let displayName: String?
@@ -4547,7 +4780,7 @@ extension AppStream {
public let enableDefaultInternetAccess: Bool?
/// The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the appstream_machine_role credential profile on the instance. For more information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide.
public let iamRoleArn: String?
- /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.
+ /// The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.
public let idleDisconnectTimeoutInSeconds: Int?
/// The ARN of the public, private, or shared image to use.
public let imageArn: String?
@@ -4826,6 +5059,72 @@ extension AppStream {
}
}
+ public struct UpdateThemeForStackRequest: AWSEncodableShape {
+ /// The attributes to delete.
+ public let attributesToDelete: [ThemeAttribute]?
+ /// The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions.
+ public let faviconS3Location: S3Location?
+ /// The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites.
+ public let footerLinks: [ThemeFooterLink]?
+ /// The organization logo that appears on the streaming application catalog page.
+ public let organizationLogoS3Location: S3Location?
+ /// The name of the stack for the theme.
+ public let stackName: String?
+ /// Specifies whether custom branding should be applied to catalog page or not.
+ public let state: ThemeState?
+ /// The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page.
+ public let themeStyling: ThemeStyling?
+ /// The title that is displayed at the top of the browser tab during users' application streaming sessions.
+ public let titleText: String?
+
+ public init(attributesToDelete: [ThemeAttribute]? = nil, faviconS3Location: S3Location? = nil, footerLinks: [ThemeFooterLink]? = nil, organizationLogoS3Location: S3Location? = nil, stackName: String? = nil, state: ThemeState? = nil, themeStyling: ThemeStyling? = nil, titleText: String? = nil) {
+ self.attributesToDelete = attributesToDelete
+ self.faviconS3Location = faviconS3Location
+ self.footerLinks = footerLinks
+ self.organizationLogoS3Location = organizationLogoS3Location
+ self.stackName = stackName
+ self.state = state
+ self.themeStyling = themeStyling
+ self.titleText = titleText
+ }
+
+ public func validate(name: String) throws {
+ try self.faviconS3Location?.validate(name: "\(name).faviconS3Location")
+ try self.footerLinks?.forEach {
+ try $0.validate(name: "\(name).footerLinks[]")
+ }
+ try self.organizationLogoS3Location?.validate(name: "\(name).organizationLogoS3Location")
+ try self.validate(self.stackName, name: "stackName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$")
+ try self.validate(self.titleText, name: "titleText", parent: name, max: 300)
+ try self.validate(self.titleText, name: "titleText", parent: name, min: 1)
+ try self.validate(self.titleText, name: "titleText", parent: name, pattern: "^[-@./#&+\\w\\s]*$")
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case attributesToDelete = "AttributesToDelete"
+ case faviconS3Location = "FaviconS3Location"
+ case footerLinks = "FooterLinks"
+ case organizationLogoS3Location = "OrganizationLogoS3Location"
+ case stackName = "StackName"
+ case state = "State"
+ case themeStyling = "ThemeStyling"
+ case titleText = "TitleText"
+ }
+ }
+
+ public struct UpdateThemeForStackResult: AWSDecodableShape {
+ /// The theme object that contains the metadata of the custom branding.
+ public let theme: Theme?
+
+ public init(theme: Theme? = nil) {
+ self.theme = theme
+ }
+
+ private enum CodingKeys: String, CodingKey {
+ case theme = "Theme"
+ }
+ }
+
public struct UsageReportSubscription: AWSDecodableShape {
/// The time when the last usage report was generated.
public let lastGeneratedReportDate: Date?
@@ -4895,7 +5194,7 @@ extension AppStream {
public struct UserSetting: AWSEncodableShape & AWSDecodableShape {
/// The action that is enabled or disabled.
public let action: Action?
- /// Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session. This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions. This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED. This can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets. The value can be between 1 and 20,971,520 (20 MB).
+ /// Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session. This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions. This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED. The value can be between 1 and 20,971,520 (20 MB).
public let maximumLength: Int?
/// Indicates whether the action is enabled or disabled.
public let permission: Permission?
diff --git a/Sources/Soto/Services/AppSync/AppSync_api.swift b/Sources/Soto/Services/AppSync/AppSync_api.swift
index 514ebe3d0d..d4f9d905a8 100644
--- a/Sources/Soto/Services/AppSync/AppSync_api.swift
+++ b/Sources/Soto/Services/AppSync/AppSync_api.swift
@@ -915,3 +915,303 @@ extension AppSync {
self.config = from.config.with(patch: patch)
}
}
+
+// MARK: Paginators
+
+@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *)
+extension AppSync {
+ /// Lists the API keys for a given API. API keys are deleted automatically 60 days after they expire. However, they may still be included in the response until they have actually been deleted. You can safely call DeleteApiKey to manually delete a key before it's automatically deleted.
+ /// Return PaginatorSequence for operation.
+ ///
+ /// - Parameters:
+ /// - input: Input for request
+ /// - logger: Logger used flot logging
+ public func listApiKeysPaginator(
+ _ input: ListApiKeysRequest,
+ logger: Logger = AWSClient.loggingDisabled
+ ) -> AWSClient.PaginatorSequence This is for internal use. The Amplify service uses this parameter to specify the authentication protocol to use\n to access the Git repository for an Amplify app. Amplify specifies The cache configuration for the Amplify app. If you don't specify the\n cache configuration The type of cache configuration to use for an Amplify app. The The Describes the cache configuration for an Amplify app. For more\n information about how Amplify applies an optimal cache configuration for\n your app based on the type of content that is being served, see Managing cache configuration in the Amplify User\n guide. The type of SSL/TLS certificate that you want to use. Specify Specify The type of SSL/TLS certificate that you want to use. Specify Specify The automated branch creation configuration for an Amplify app. The cache configuration for the Amplify app. The personal access token for a GitHub repository for an Amplify app. The personal\n access token is used to authorize access to a GitHub repository using the Amplify GitHub\n App. The token is not stored. Use You must specify either Existing Amplify apps deployed from a GitHub repository using OAuth continue to work\n with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub\n App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the\n Amplify User Guide . The cache configuration for the Amplify app. The Amazon AppIntegrations service enables you to configure and reuse connections to external\n applications. For information about how you can use external applications with Amazon Connect, see\n Set up pre-built\n integrations and Deliver information to agents\n using Amazon Connect Wisdom in the Amazon Connect Administrator\n Guide. The Amazon AppIntegrations service enables you to configure and reuse connections to external\n applications. For information about how you can use external applications with Amazon Connect, see\n the following topics in the Amazon Connect Administrator\n Guide: \n Third-party\n applications (3p apps) in the agent workspace\n \n Use\n Amazon Q in Connect for generative AI–powered agent assistance in\n real-time\n This API is in preview release and subject to change. Creates and persists an Application resource. Creates and persists an Application resource. Creates and persists a DataIntegrationAssociation resource. A unique identifier for the DataIntegration. The identifier for the client that is associated with the DataIntegration\n association. The URI of the data destination. The mapping of metadata to be extracted from the data. A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs. The configuration for how the files should be pulled from the source. A unique identifier. for the DataIntegrationAssociation. The Amazon Resource Name (ARN) for the DataIntegration. The KMS key for the DataIntegration. The KMS key ARN for the DataIntegration. The URI of the data source. The URI of the data source. The KMS key for the DataIntegration. The KMS key ARN for the DataIntegration. The identifier for the client that is associated with the DataIntegration\n association. The URI of the data destination. The execution status of the last job. Deletes the Application. Only Applications that don't have any Application Associations can be deleted. Deletes the Application. Only Applications that don't have any Application Associations\n can be deleted. The mode for data import/export execution. The configuration for how the files should be pulled from the source. This API is in preview release and subject to change. Get an Application resource. Get an Application resource. The KMS key for the DataIntegration. The KMS key ARN for the DataIntegration. The KMS key for the DataIntegration. The KMS key ARN for the DataIntegration. The job status enum string. The status message of a job. The execution status of the last job. This API is in preview release and subject to change. Lists applications in the account. Lists applications in the account. The configuration for what data should be pulled from the source. The start time for data pull from the source as an Unix/epoch string in\n milliseconds The end time for data pull from the source as an Unix/epoch string in\n milliseconds The start and end time for data pull from the source. This API is in preview release and subject to change. Updates and persists an Application resource. Updates and persists an Application resource. Updates and persists a DataIntegrationAssociation resource. \n Updating a DataIntegrationAssociation with ExecutionConfiguration will rerun the on-demand job.\n A unique identifier for the DataIntegration. A unique identifier. of the DataIntegrationAssociation resource The configuration for how the files should be pulled from the source. This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability\n release. Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. \n It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. \n The application-centric view provides you with unified visibility across your applications, services, and \n dependencies, so you can proactively monitor and efficiently triage any issues that may arise, \n ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. \n It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. \n The application-centric view provides you with unified visibility across your applications, services, and \n dependencies, so you can proactively monitor and efficiently triage any issues that may arise, \n ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, \n and application names within dashboards and maps. The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The start time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The end time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. An array of string-to-string maps that each contain information about one log group associated with this service. Each \n string-to-string map includes the following fields: \n \n \n The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested end time will be rounded to the nearest hour. The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested end time will be rounded to the nearest hour. The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: Your requested start time will be rounded to the nearest hour. The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: This displays the time that Application Signals used for the request. It might not match your request exactly, because \n it was rounded to the nearest hour. Cannot find the resource id. Can't find the resource id. An array of structures that each contain information about one metric associated with this service. An array of string-to-string maps that each contain information about one log group associated with this service. Each \n string-to-string map includes the following fields: \n \n \n The instance type to use when launching fleet instances. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge The instance type to use when launching fleet instances. The following instance types are available: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge stream.compute.large stream.compute.xlarge stream.compute.2xlarge stream.compute.4xlarge stream.compute.8xlarge stream.memory.large stream.memory.xlarge stream.memory.2xlarge stream.memory.4xlarge stream.memory.8xlarge stream.memory.z1d.large stream.memory.z1d.xlarge stream.memory.z1d.2xlarge stream.memory.z1d.3xlarge stream.memory.z1d.6xlarge stream.memory.z1d.12xlarge stream.graphics-design.large stream.graphics-design.xlarge stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics.g5.xlarge stream.graphics.g5.2xlarge stream.graphics.g5.4xlarge stream.graphics.g5.8xlarge stream.graphics.g5.12xlarge stream.graphics.g5.16xlarge stream.graphics.g5.24xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000. The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000. The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. Creates custom branding that customizes the appearance of the streaming application catalog page. The name of the stack for the theme. The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites. The title that is displayed at the top of the browser tab during users' application streaming sessions. The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page. The organization logo that appears on the streaming application catalog page. The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions. The theme object that contains the metadata of the custom branding. Deletes custom branding that customizes the appearance of the streaming application catalog page. The name of the stack for the theme. Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described. Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described. Retrieves a list that describes the theme for a specified stack. A theme is custom branding that customizes the appearance of the streaming application catalog page. The name of the stack for the theme. The theme object that contains the metadata of the custom branding. The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000. The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000. The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. Describes the errors that are returned when a new image can't be created. Indicates whether the image is using the latest AppStream 2.0 agent version or not. The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported: General Purpose Compute Optimized Memory Optimized Graphics Graphics Design Graphics Pro Graphics G4 Graphics G5 Indicates whether dynamic app providers are enabled within an AppStream 2.0 image or not. Indicates whether the image is shared with another account ID. The list of virtual private cloud (VPC) interface endpoint objects. Administrators can connect to the image builder only through the specified endpoints. Indicates whether the image builder is using the latest AppStream 2.0 agent version or not. Describes the permissions for an image. The stack that has the custom branding theme. The state of the theme. The browser tab page title. The color that is used for the website links, text, buttons, and catalog page background. The website links that display in the catalog page footer. The URL of the logo that displays in the catalog page header. The URL of the icon that displays at the top of a user's browser tab during streaming sessions. The time the theme was created. The custom branding theme, which might include a custom logo, website links, and other branding to display to users. The name of the websites that display in the catalog page footer. The URL of the websites that display in the catalog page footer. The website links that display in the catalog page footer. The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000. The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000. The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. The amount of time that users can be idle (inactive) before they are disconnected\n from their streaming session and the To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity. Updates custom branding that customizes the appearance of the streaming application catalog page. The name of the stack for the theme. The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites. The title that is displayed at the top of the browser tab during users' application streaming sessions. The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page. The organization logo that appears on the streaming application catalog page. The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions. Specifies whether custom branding should be applied to catalog page or not. The attributes to delete. The theme object that contains the metadata of the custom branding. Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session. This can be specified only for the This defaults to 20,971,520 (20 MB) when unspecified and the permission is This can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets. The value can be between 1 and 20,971,520 (20 MB). Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session. This can be specified only for the This defaults to 20,971,520 (20 MB) when unspecified and the permission is The value can be between 1 and 20,971,520 (20 MB). One or more subnet IDs, if applicable, separated by commas. A comma-separated list of subnet IDs for a virtual private cloud (VPC) where instances\n in the Auto Scaling group can be created. If you specify A comma-separated list of subnet IDs for a virtual private cloud (VPC). If you specify\n Cancels a job in an Batch job queue. Jobs that are in the\n A When you try to cancel an array parent job in Jobs that progressed to the Cancels a job in an Batch job queue. Jobs that are in a A When you try to cancel an array parent job in Jobs that progressed to the Unique identifier for the compute environment. Reserved. The details for the Amazon EKS cluster that supports the compute environment. Reserved. Specifies the updated infrastructure update policy for the compute environment. For more\n information about infrastructure updates, see Updating compute environments in\n the Batch User Guide. Reserved. A name for the input of the flow input node. The name of the flow input node that begins the prompt flow. A name for the output of the flow input node. The name of the output from the flow input node that begins the prompt flow. Contains information about an input into the flow. Contains information about an input into the prompt flow. Contains information about an input into the flow and what to do with it. This data type is used in the following API operations: \n InvokeFlow request\n Contains information about an input into the prompt flow and where to send it. This data type is used in the following API operations: \n InvokeFlow request\n The input for the flow input node. The input to send to the prompt flow input node. A name for the output of the flow. The content in the output. Contains information about the output node. This data type is used in the following API operations: \n InvokeFlow request\n Contains information about the content in an output from prompt flow invocation. This data type is used in the following API operations: \n InvokeFlow request\n The name of the node to which input was provided. The name of the flow output node that the output is from. The type of node to which input was provided. The type of the node that the output is from. The output of the node. The content in the output. Contains information about an output from flow invoction. This data type is used in the following API operations: \n InvokeFlow response\n Contains information about an output from prompt flow invoction. This data type is used in the following API operations: \n InvokeFlow response\n While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. The CLI doesn't support streaming operations in Amazon Bedrock, including Contains details of the foundation model usage. Provides details of the foundation model. Settings for how the model processes the prompt prior to retrieval and generation. The unique identifier of the trace. Contains details of the raw response from the foundation model output. Contains information about the foundation model output. The foundation model output from the orchestration step. The input for the orchestration step. The The The Contains information pertaining to the output from the foundation model that is being invoked. The foundation model's raw output content. Contains the raw output from the foundation model. Contains information about the input tokens from the foundation model usage. Contains information about the output tokens from the foundation model usage. Contains information of the usage of the foundation model. Sends messages to the specified Amazon Bedrock model. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n For example code, see Converse API examples in the Amazon Bedrock User Guide.\n This operation requires permission for the Sends messages to the specified Amazon Bedrock model. Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n For example code, see Converse API examples in the Amazon Bedrock User Guide.\n This operation requires permission for the Sends messages to the specified Amazon Bedrock model and returns\n the response in a stream. To find out if a model supports streaming, call GetFoundationModel\n and check the For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n For example code, see Conversation streaming example in the Amazon Bedrock User Guide.\n This operation requires permission for the Sends messages to the specified Amazon Bedrock model and returns\n the response in a stream. To find out if a model supports streaming, call GetFoundationModel\n and check the The CLI doesn't support streaming operations in Amazon Bedrock, including Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n For example code, see Conversation streaming example in the Amazon Bedrock User Guide.\n This operation requires permission for the The number of requests exceeds the limit. Resubmit your request later. The service isn't currently available. Try again later. Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream. To see if a model supports streaming, call GetFoundationModel\n and check the The CLI doesn't support For example code, see Invoke model with streaming code\n example in the Amazon Bedrock User Guide.\n This operation requires permissions to perform the Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream. To see if a model supports streaming, call GetFoundationModel\n and check the The CLI doesn't support streaming operations in Amazon Bedrock, including For example code, see Invoke model with streaming code\n example in the Amazon Bedrock User Guide.\n This operation requires permissions to perform the The number or frequency of requests exceeds the limit. Resubmit your request later. Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process. The request took too long to process. Processing time exceeded the model timeout length. The number of requests exceeds the service quota. Resubmit your request later. Your request exceeds the service quota for your account. You can view your quotas at Viewing service quotas. You can resubmit your request later. The service isn't currently available. Try again later. The number of requests exceeds the limit. Resubmit your request later. Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process. API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluations. API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluation. Copies a model to another region so that it can be used there. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. The Amazon Resource Name (ARN) of the model to be copied. A name for the copied model. The ARN of the KMS key that you use to encrypt the model copy. Tags to associate with the target model. For more information, see Tag resources in the Amazon Bedrock User Guide. A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n\t Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. The Amazon Resource Name (ARN) of the model copy job. Creates a fine-tuning job to customize a base model. You specify the base foundation model and the location of the training data.\n After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.\n For information on the format of training and validation data, see Prepare the datasets. \n Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size.\n To monitor a job, use the For more information, see Custom models in the Amazon Bedrock User Guide. Creates a fine-tuning job to customize a base model. You specify the base foundation model and the location of the training data.\n After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.\n For information on the format of training and validation data, see Prepare the datasets. \n Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size.\n To monitor a job, use the For more information, see Custom models in the Amazon Bedrock User Guide. VPC configuration (optional). Configuration parameters for the\n private Virtual Private Cloud (VPC) that contains the resources you are using for this job. VPC configuration (optional). Configuration parameters for the\n private Virtual Private Cloud (VPC) that contains the resources you are using for this job. Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs. For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide. For more information about what an MU specifies, contact your Amazon Web Services account manager. Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs. For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide. For more information about what an MU specifies, contact your Amazon Web Services account manager. The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide. The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide. The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field. Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field. Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide\n Specifies whether to carry out continued pre-training of a model or whether to fine-tune it. For more information, see Custom models. The unique identifier of the account that owns the model. Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide. Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide. Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide. Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide. Retrieves the properties associated with a model evaluation job, including the\n status of the job. For more information, see Model evaluations. Retrieves the properties associated with a model evaluation job, including the\n status of the job. For more information, see Model evaluation. Retrieves information about a model copy job. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. The Amazon Resource Name (ARN) of the model copy job. The Amazon Resource Name (ARN) of the model copy job. The status of the model copy job. The time at which the model copy job was created. The Amazon Resource Name (ARN) of the copied model. The name of the copied model. The unique identifier of the account that the model being copied originated from. The Amazon Resource Name (ARN) of the original model being copied. The Amazon Resource Name (ARN) of the KMS key encrypting the copied model. The tags associated with the copied model. An error message for why the model copy job failed. The name of the original model being copied. Retrieves the properties associated with a model-customization job, including the status of the job.\n For more information, see Custom models in the Amazon Bedrock User Guide. Retrieves the properties associated with a model-customization job, including the status of the job.\n For more information, see Custom models in the Amazon Bedrock User Guide. The status of the job. A successful job transitions from in-progress to completed when the output model is ready to use.\n If the job failed, the failure message contains information about why the job failed. The status of the job. A successful job transitions from in-progress to completed when the output model is ready to use.\n If the job failed, the failure message contains information about why the job failed. Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. The type of PII entity. For example, Social Security Number. The type of PII entity. For exampvle, Social Security Number. In a model evaluation job that uses human workers you must\n define the name of the metric, and how you want that metric rated\n In a model evaluation job that uses human workers you must \n define the name of the metric, and how you want that metric rated\n Returns a list of the custom models that you have created with the For more information, see Custom models in the Amazon Bedrock User Guide. Returns a list of the custom models that you have created with the For more information, see Custom models in the Amazon Bedrock User Guide. Maximum number of results to return in the response. The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the Continuation token from the previous response, for Amazon Bedrock to list the next set of results. If the total number of results is greater than the The sort order of the results. Return custom models depending on if the current account owns them ( Continuation token for the next request to list the next set of results. If the total number of results is greater than the Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide. Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide. Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide. Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide. Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on\n one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide. Filters for model copy jobs created after the specified time. Filters for model copy jobs created before the specified time. Filters for model copy jobs whose status matches the value that you specify. Filters for model copy jobs in which the account that the source model belongs to is equal to the value that you specify. Filters for model copy jobs in which the Amazon Resource Name (ARN) of the source model to is equal to the value that you specify. Filters for model copy jobs in which the name of the copied model contains the string that you specify. The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the If the total number of results is greater than the The field to sort by in the returned list of model copy jobs. Specifies whether to sort the results in ascending or descending order. If the total number of results is greater than the A list of information about each model copy job. Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on\n one or more criteria. For more information, see Custom models in the Amazon Bedrock User Guide. Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on\n one or more criteria. For more information, see Custom models in the Amazon Bedrock User Guide. Maximum number of results to return in the response. The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the Continuation token from the previous response, for Amazon Bedrock to list the next set of results. If the total number of results is greater than the Page continuation token to use in the next request. If the total number of results is greater than the Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. List the tags associated with the specified resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. List the tags associated with the specified resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. The Amazon Resoource Name (ARN) of the model copy job. The status of the model copy job. The time that the model copy job was created. The Amazon Resource Name (ARN) of the copied model. The name of the copied model. The unique identifier of the account that the model being copied originated from. The Amazon Resource Name (ARN) of the original model being copied. The Amazon Resource Name (ARN) of the KMS key used to encrypt the copied model. Tags associated with the copied model. If a model fails to be copied, a message describing why the job failed is included here. The name of the original model being copied. Contains details about each model copy job. This data type is used in the following API operations: Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide. Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide. Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. The request contains more tags than can be associated with a resource (50 tags per resource).\n The maximum number of tags includes both existing tags and those included in your current request. The request contains more tags than can be associated with a resource (50 tags per resource). \n The maximum number of tags includes both existing tags and those included in your current request. Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide. Columns that must meet a specific threshold value (after an aggregation function is\n applied to it) for each output row to be returned. An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query. The\n An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query. A list of columns that aren't allowed to be shown in the query output. A type of analysis rule that enables the table owner to approve custom SQL queries on\n their configured tables. It supports differential privacy. The columns that query runners are allowed to use in an INNER JOIN statement. The query constraints of the analysis rule ID mapping table. The columns that query runners are allowed to select, group by, or filter by. Defines details for the analysis rule ID mapping table. Columns that can be listed in the output. An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query. Analysis rule type that enables custom SQL queries on a configured table. The ID mapping table. The names for the schema objects to\n retrieve. The names for the schema objects to retrieve. The unique identifier of the collaboration ID namespace association. The Amazon Resource Name (ARN) of the collaboration ID namespace association. The unique identifier of the collaboration that contains the collaboration ID namespace association. The Amazon Resource Name (ARN) of the collaboration that contains the collaboration ID namespace association. The name of the collaboration ID namespace association. The description of the collaboration ID namespace association. The unique identifier of the Amazon Web Services account that created the collaboration ID namespace association. The time at which the collaboration ID namespace association was created. The most recent time at which the collaboration ID namespace was updated. The input reference configuration that's necessary to create the collaboration ID namespace association. The input reference properties that are needed to create the collaboration ID namespace association. Defines details for the collaboration ID namespace association. The Amazon Resource Name (ARN) of the collaboration ID namespace association. The time at which the collaboration ID namespace association was created. The unique identifier of the collaboration ID namespace association. The most recent time at which the collaboration ID namespace association was updated. The Amazon Resource Name (ARN) of the collaboration that contains this collaboration ID namespace association. The unique identifier of the collaboration that contains this collaboration ID namespace association. The Amazon Web Services account that created this collaboration ID namespace association. The input reference configuration that's used to create the collaboration ID namespace association. The name of the collaboration ID namespace association. The description of the collaboration ID namepsace association. The input reference properties that are used to create the collaboration ID namespace association. Provides summary information about the collaboration ID namespace association. The direct analysis configuration details. The configuration details. A description of the configured table association. The analysis rule types for the configured table association. A configured table association links a configured table to a collaboration. The membership identifier for the configured table association analysis rule. \n The\n unique identifier for the configured table association. \n The\n Amazon Resource Name (ARN) of the configured table association. The policy of the configured table association analysis rule. The type of the configured table association analysis rule. The creation time of the configured table association analysis rule. The update time of the configured table association analysis rule. An\n analysis rule for a configured table association. This analysis rule specifies how data\n from the table can be used within its associated\n collaboration.\n In the console, the The list of collaboration members who are allowed to receive results of queries run\n with this configured table. The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output. The\n The configured table association analysis rule applied to a configured table with the aggregation analysis rule. The list of\n collaboration members who are allowed\n to\n receive results of queries run with this configured table. The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output. The configured table association analysis rule applied to a configured table with the custom analysis rule. The list of collaboration members who are allowed to receive results of queries run\n with this configured table. The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output. The configured table association analysis rule applied to a configured table with the list analysis rule. The policy for the configured table association analysis rule. Controls on the query specifications that can be run on an associated configured table. Analysis rule type that enables only list queries on a configured table. Analysis rule type that enables only aggregation queries on a configured table. Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy. Controls on the query specifications that can be run on an associated configured table. The collaboration\n creator's payment responsibilities set by the collaboration creator. If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. The collaboration creator's payment responsibilities set by the collaboration creator. If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. The entire created collaboration object. The collaboration. The entire created configured table analysis rule object. The analysis rule policy that was created for the configured table. The entire created analysis rule. The analysis rule that was created for the configured table. Creates a new analysis rule for an associated configured table. A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID. The unique ID for the configured table association. Currently accepts the\n configured table association ID. The type of analysis rule. The analysis rule policy that was created for the configured table\n association. The analysis rule for the configured table association.\n In the console, the\n The entire configured table association object. The configured table association. Creates a membership for a specific collaboration identifier and joins the\n collaboration. Creates an ID mapping table. The unique ID for the associated collaboration. The unique identifier of the membership that contains the ID mapping table. An indicator as to whether query logging has been enabled or disabled for the\n membership. A name for the ID mapping table. A description of the ID mapping table. The input reference configuration needed to create the ID mapping table. An optional label that you can assign to a resource when you create it. Each tag\n consists of a key and an optional value, both of which you define. When you use tagging,\n you can also use tag-based access control in IAM policies to control access\n to this resource. The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This value is used to encrypt the mapping table data that is stored by Clean Rooms. The ID mapping table that was created. Creates an ID namespace association. The unique identifier of the membership that contains the ID namespace association. The input reference configuration needed to create the ID namespace association. An optional label that you can assign to a resource when you create it. Each tag\n consists of a key and an optional value, both of which you define. When you use tagging,\n you can also use tag-based access control in IAM policies to control access\n to this resource. The name for the ID namespace association. The description of the ID namespace association. The configuration settings for the ID mapping table. The ID namespace association that was created. Creates a membership for a specific collaboration identifier and joins the\n collaboration. The unique ID for the associated collaboration. An indicator as to whether query logging has been enabled or disabled for the\n membership. An optional label that you can assign to a resource when you create it. Each tag\n consists of a key and an optional value, both of which you define. When you use tagging,\n you can also use tag-based access control in IAM policies to control access\n to this resource. The default\n protected query result configuration as specified by the member who can receive\n results. The default protected query result configuration as specified by the member who can\n receive results. The payment\n responsibilities accepted by the collaboration member. Not required if the collaboration member has the member ability to run queries. Required if the collaboration member doesn't have the member ability to run queries but\n is configured as a payer by the collaboration creator. The payment responsibilities accepted by the collaboration member. Not required if the collaboration member has the member ability to run queries. Required if the collaboration member doesn't have the member ability to run queries but\n is configured as a payer by the collaboration creator. Indicates whether encrypted tables can contain cleartext data\n ( Indicates whether encrypted tables can contain cleartext data ( Indicates whether Fingerprint columns can contain duplicate entries\n ( Indicates whether Fingerprint columns can contain duplicate entries ( Indicates whether Fingerprint columns can be joined on any other Fingerprint column with\n a different name\n ( Indicates whether Fingerprint columns can be joined on any other Fingerprint column with\n a different name ( Indicates whether NULL values are to be copied as NULL to encrypted tables\n ( Indicates whether NULL values are to be copied as NULL to encrypted tables\n ( Deletes\n an analysis rule for a configured table association. A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID. The\n identifier for the configured table association that's related to the analysis rule that you\n want to delete. The\n type of the analysis rule that you want to delete. The empty output for a successful deletion. Removes the specified member from a collaboration. The removed member is placed in the\n Removed status and can't interact with the collaboration. The removed member's data is\n inaccessible to active members of the collaboration. Deletes an ID mapping table. The unique identifier of the ID mapping table that you want to delete. The unique identifier of the membership that contains the ID mapping table that you want to delete. Deletes an ID namespace association. The unique identifier of the ID namespace association that you want to delete. The unique identifier of the membership that contains the ID namespace association that you want to delete. Removes the specified member from a collaboration. The removed member is placed in the\n Removed status and can't interact with the collaboration. The removed member's data is\n inaccessible to active members of the collaboration. The epsilon and noise parameter values that you want to update in the differential privacy template. The account IDs for the member who received the results of a protected query. The direct analysis configuration details. Retrieves an ID namespace association from a specific collaboration. The unique identifier of the collaboration that contains the ID namespace association that you want to retrieve. The unique identifier of the ID namespace association that you want to retrieve. The ID namespace association that you requested. \n Retrieves\n the analysis rule for a configured table association. A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID. The unique ID for the configured table association to retrieve. Currently accepts the\n configured table ID. \n The\n identifier for the configured table association that's related to the analysis\n rule. A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID. \n The\n type of analysis rule that you want to retrieve. The\n analysis rule for the configured table association. In the console, the\n The unique ID for the configured table association to retrieve. Currently accepts the\n configured table ID. A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID. The entire configured table association object. Retrieves an ID mapping table. The unique identifier of the ID mapping table identifier that you want to retrieve. The unique identifier of the membership that contains the ID mapping table that you want to retrieve. The ID mapping table that you requested. Retrieves an ID namespace association. The unique identifier of the ID namespace association that you want to retrieve. The unique identifier of the membership that contains the ID namespace association that you want to retrieve. The ID namespace association that you requested. The name of the Glue table. The name of the database the Glue table belongs to. A reference to a table within an Glue data catalog. An indicator as to whether you can use your column as a dimension column in the ID mapping table ( Default is The configuration settings for the ID mapping table. The unique identifier of the ID mapping table. The Amazon Resource Name (ARN) of the ID mapping table. The input reference configuration for the ID mapping table. The unique identifier of the membership resource for the ID mapping table. The Amazon Resource Name (ARN) of the membership resource for the ID mapping table. The unique identifier of the collaboration that contains this ID mapping table. The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table. The description of the ID mapping table. The name of the ID mapping table. The time at which the ID mapping table was created. The most recent time at which the ID mapping table was updated. The input reference properties for the ID mapping table. The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. Describes information about the ID mapping table. The Amazon Resource Name (ARN) of the referenced resource in Entity Resolution. Valid values are ID mapping workflow ARNs. When When Provides the input reference configuration for the ID mapping table. The input source of the ID mapping table. The input reference properties for the ID mapping table. The unique identifier of the ID namespace association. The type of the input source of the ID mapping table. The input source of the ID mapping table. Defines which ID namespace associations are used to create the ID mapping table. Additional properties that are specific to the type of the associated schema. The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table. The unique identifier of the collaboration that contains this ID mapping table. The unique identifier of the membership resource for this ID mapping table. The Amazon Resource Name (ARN) of the membership resource for this ID mapping table. The time at which this ID mapping table was created. The most recent time at which this ID mapping table was updated. The unique identifier of this ID mapping table. The Amazon Resource Name (ARN) of this ID mapping table. The description of this ID mapping table. The input reference configuration for the ID mapping table. The name of this ID mapping table. Detailed information about the ID mapping table. The unique identifier for this ID namespace association. The Amazon Resource Name (ARN) of the ID namespace association. The unique identifier of the membership resource for this ID namespace association. The Amazon Resource Name (ARN) of the membership resource for this ID namespace association. The unique identifier of the collaboration that contains this ID namespace association. The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association. The name of this ID namespace association. The description of the ID namespace association. The time at which the ID namespace association was created. The most recent time at which the ID namespace association was updated. The input reference configuration for the ID namespace association. The input reference properties for the ID namespace association. The configuration settings for the ID mapping table. Provides information to create the ID namespace association. The Amazon Resource Name (ARN) of the Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own. When When Provides the information for the ID namespace association input reference configuration. The ID namespace type for this ID namespace association. Defines how ID mapping workflows are supported for this ID namespace association. Provides the information for the ID namespace association input reference properties. The ID namespace type for this ID namespace association. Detailed information about the ID namespace association input reference properties. The unique identifier of the membership resource for this ID namespace association. The Amazon Resource Name (ARN) of the membership resource for this ID namespace association. The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association. The unique identifier of the collaboration that contains this ID namespace association. The time at which this ID namespace association was created. The most recent time at which this ID namespace association has been updated. The unique identifier of this ID namespace association. The Amazon Resource Name (ARN) of this ID namespace association. The input reference configuration details for this ID namespace association. The name of the ID namespace association. The description of the ID namespace association. The input reference properties for this ID namespace association. Detailed information about the ID namespace association. The name of the Glue table. The name of the database the Glue table belongs to. A reference to a table within an Glue data catalog. Returns a list of the ID namespace associations in a collaboration. The unique identifier of the collaboration that contains the ID namespace associations that you want to retrieve. The pagination token that's used to fetch the next set of results. The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.> The token value provided to access the next page of results. The summary information of the collaboration ID namespace associations that you requested. Lists configured table associations for a membership. A unique identifier for the membership to list configured table associations for.\n Currently accepts the membership ID. The token value retrieved from a previous call to access the next page of\n results. The maximum size of the results that is returned per call. The retrieved list of configured table associations. The token value retrieved from a previous call to access the next page of\n results. Lists configured tables. The token value retrieved from a previous call to access the next page of\n results. The maximum size of the results that is returned per call. The configured tables listed by the request. The token value retrieved from a previous call to access the next page of\n results. Lists configured table associations for a membership. Returns a list of ID mapping tables. A unique identifier for the membership to list configured table associations for.\n Currently accepts the membership ID. The unique identifier of the membership that contains the ID mapping tables that you want to view. The token value retrieved from a previous call to access the next page of\n results. The pagination token that's used to fetch the next set of results. The maximum size of the results that is returned per call. The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met. The retrieved list of configured table associations. The summary information of the ID mapping tables that you requested. The token value retrieved from a previous call to access the next page of\n results. The token value provided to access the next page of results. Lists configured tables. Returns a list of ID namespace associations. The unique identifier of the membership that contains the ID namespace association that you want to view. The token value retrieved from a previous call to access the next page of\n results. The pagination token that's used to fetch the next set of results. The maximum size of the results that is returned per call. The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met. The configured tables listed by the request. The token value provided to access the next page of results. The token value retrieved from a previous call to access the next page of\n results. The summary information of the ID namespace associations that you requested. The collaboration\n member's payment responsibilities set by the collaboration creator.\n If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. The collaboration member's payment responsibilities set by the collaboration creator. If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. The status of the member.\n The status of the member. The collaboration\n member's payment responsibilities set by the collaboration creator.\n The collaboration member's payment responsibilities set by the collaboration creator. The status of the\n membership. The status of the membership. The payment\n responsibilities accepted by the collaboration member. The payment responsibilities accepted by the collaboration member. The payment\n responsibilities accepted by the collaboration member for query compute\n costs. The payment responsibilities accepted by the collaboration member for query compute\n costs. An object\n representing the payment responsibilities accepted by the collaboration\n member. An object representing the payment responsibilities accepted by the collaboration\n member. Indicates whether\n the collaboration member has accepted to pay for query compute costs ( If the collaboration creator has not specified anyone to pay for query compute costs,\n then the member who can query is the default payer. An error message is returned for the following reasons: If you set the value to If you set the value to Indicates whether the collaboration member has accepted to pay for query compute costs\n ( If the collaboration creator has not specified anyone to pay for query compute costs,\n then the member who can query is the default payer. An error message is returned for the following reasons: If you set the value to If you set the value to An object\n representing the payment responsibilities accepted by the collaboration member for query\n compute costs. An object representing the payment responsibilities accepted by the collaboration member\n for query compute costs. The status of the\n membership. The status of the membership. The payment\n responsibilities accepted by the collaboration member. The payment responsibilities accepted by the collaboration member. The collaboration\n member's payment responsibilities set by the collaboration creator for query compute\n costs. The collaboration member's payment responsibilities set by the collaboration creator for\n query compute costs. An object representing the collaboration member's payment responsibilities set by the\n collaboration creator. Defines the information that's necessary to populate an ID mapping table. The unique identifier of the ID mapping table that you want to populate. The unique identifier of the membership that contains the ID mapping table that you want to populate. The unique identifier of the mapping job that will populate the ID mapping table. An object\n representing the collaboration member's payment responsibilities set by the collaboration\n creator. Specifies the epislon and noise parameters for the privacy budget template. Specifies the\n epsilon\n and noise parameters for the privacy budget template. The\n unique identifier for the account. Contains configuration details for the protected query member output. Required configuration for a protected query with an `S3` output type. Required configuration for a protected query with an\n Required configuration for a protected query with a The query string to be submitted. The duration of the Protected Query, from creation until query completion. The duration of the protected query, from creation until query completion. The status of the protected query. Value values are `SUBMITTED`, `STARTED`, `CANCELLED`,\n `CANCELLING`, `FAILED`, `SUCCESS`, `TIMED_OUT`. The receiver configuration. Indicates whether\n the collaboration creator has configured the collaboration member to pay for query compute\n costs ( Exactly one member can be configured to pay for query compute costs. An error is\n returned if the collaboration creator sets a If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. An error is returned if\n the collaboration creator sets a Indicates whether the collaboration creator has configured the collaboration member to\n pay for query compute costs ( Exactly one member can be configured to pay for query compute costs. An error is\n returned if the collaboration creator sets a If the collaboration creator hasn't specified anyone as the member paying for query\n compute costs, then the member who can query is the default payer. An error is returned if\n the collaboration creator sets a An object\n representing the collaboration member's payment responsibilities set by the collaboration\n creator for query compute costs. An object representing the collaboration member's payment responsibilities set by the\n collaboration creator for query compute costs. An array of column names that specifies which columns are required in the JOIN statement. Provides any necessary query constraint information. The columns that are required to overlap. Provides the name of the columns that are required to overlap. The type of analysis for the protected query. The results of the query can be analyzed directly ( The configuration details of the receiver configuration. The receiver configuration for a protected query. Details about the status of the schema. Currently, only one entry is present. The schema type properties. The status of the schema. The status of the schema, indicating if it is ready to query. The configuration details of the schema analysis rule for the given type. The type of analysis that can be performed on the schema. A schema can have an An explanation of the schema status reason code. A reason why the schema status is set to its current value. An explanation of the schema status reason code. A reason why the schema status is set to its current value. The ID mapping table for the schema type properties. Information about the schema type properties. \n Updates\n the analysis rule for a configured table association. A unique identifier for the membership that the configured table association belongs to.\n Currently accepts the membership ID. The identifier for the configured table association to update. The analysis rule type\n that\n you want to update. \n The\n updated analysis rule policy for the configured table association. \n The\n updated analysis rule for the configured table association. In the console, the\n Provides the details that are necessary to update an ID mapping table. The unique identifier of the ID mapping table that you want to update. The unique identifier of the membership that contains the ID mapping table that you want to update. A new description for the ID mapping table. The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. The updated ID mapping table. Provides the details that are necessary to update an ID namespace association. The unique identifier of the ID namespace association that you want to update. The unique identifier of the membership that contains the ID namespace association that you want to update. A new name for the ID namespace association. A new description for the ID namespace association. The configuration settings for the ID mapping table. The updated ID namespace association. Defines the Amazon S3 bucket where the seed audience for the generating audience is stored. A valid data source is a JSON line file in the following format: \n \n \n Defines the Amazon S3 bucket where the seed audience for the generating audience is stored. A valid data source is a JSON line file in the following format: \n \n \n The ARN of the IAM role that can read the Amazon S3 bucket where the training data is stored. The ARN of the IAM role that can read the Amazon S3 bucket where the seed audience is stored. The protected SQL query parameters. Configure the list of audience output sizes that can be created. A request to StartAudienceGenerationJob that uses this configured audience model must have an Returns the relevance scores at these audience sizes when used in the GetAudienceGenerationJob for a specified audience generation job and configured audience model. Specifies the list of allowed The tags that are associated to this audience generation job. The unique identifier of the protected query for this audience generation job. The query string to be submitted. The Amazon Resource Name (ARN) associated with the analysis template within a\n collaboration. The protected query SQL parameters. The parameters for the SQL type Protected Query. Imports the source repository credentials for an CodeBuild project that has its\n source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. Imports the source repository credentials for an CodeBuild project that has its\n source code stored in a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket,\n this is either the access token or the app password. For the For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket,\n this is either the access token or the app password. For the The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or\n Bitbucket repository. An OAUTH connection is not supported by the API and must be\n created using the CodeBuild console. Note that CODECONNECTIONS is only valid for \n GitLab and GitLab Self Managed. The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or\n Bitbucket repository. An OAUTH connection is not supported by the API and must be\n created using the CodeBuild console. Information about the authorization settings for CodeBuild to access the source code to be\n built. This information is for the CodeBuild console's use only. Your code should not get or set\n this information directly. Information about the authorization settings for CodeBuild to access the source code to be\n built. The authorization type to use. Valid options are OAUTH or CODECONNECTIONS. The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER. Information about the authorization settings for CodeBuild to access the source code to be\n built. This information is for the CodeBuild console's use only. Your code should not get or set\n this information directly. Information about the authorization settings for CodeBuild to access the source code to be\n built. The type of authentication used by the credentials. Valid options are OAUTH,\n BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS. The type of authentication used by the credentials. Valid options are OAUTH,\n BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER. The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS. The connection ARN if your authType is CODECONNECTIONS or SECRETS_MANAGER. The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for kmsKeyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference. If no key is specified, the default The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for kmsKeyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference. If no key is specified, the default Information about the type of an object in a merge operation. Any message associated with the exception. The requested action is not allowed. The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for keyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference. The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to\n programmatically retrieve a key ID. For more information about acceptable values for keyID, see \n KeyId in the Decrypt API description in \n the Key Management Service API Reference. The conditions that are configured as entry conditions. The conditions for making checks for entry to a stage. The action to be done when the condition is met. For example, rolling back an execution for a failure condition. The rules that make up the condition. The condition for the stage. A condition is made up of the rules and the result for the condition. The status of the run for a condition. The summary of information about a run for a condition. The last status change of the condition. The run of a condition. Unable to override because the condition does not allow overrides. The state of the latest run of the rule. The state of the rules for the condition. Information about the state of the condition. The specified result for when the failure conditions are met, such as rolling back the\n stage. The conditions that are configured as failure conditions. Gets the set of key-value pairs (metadata) that are used to manage the\n resource. Lists the rule executions that have occurred in a pipeline configured for conditions with rules. The Amazon Resource Name (ARN) of the resource to get tags for. The name of the pipeline for which you want to get execution summary\n information. The token that was returned from the previous API call, which would be used to return\n the next page of the list. The ListTagsforResource call lists all available tags in one\n call and does not use pagination. Input information used to filter rule execution history. The maximum number of results to return in a single call. The tags for the resource. The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value. Pipeline history is\n limited to the most recent 12 months, based on pipeline execution start times. Default\n value is 100. If the amount of returned information is significantly large, an identifier is also\n returned and can be used in a subsequent API call to return the next page of the list.\n The ListTagsforResource call lists all available tags in one call and does not use\n pagination. The token that was returned from the previous The detail returned for each webhook, such as the webhook authentication type and\n filter rules. A unique URL generated by CodePipeline. When a POST request is made to this\n URL, the defined pipeline is started as long as the body of the post request satisfies\n the defined authentication and filtering conditions. Deleting and re-creating a webhook\n makes the old URL invalid and generates a new one. The text of the error message about the webhook. The number code of the error. The date and time a webhook was last successfully triggered, in timestamp\n format. The Amazon Resource Name (ARN) of the webhook. Details about the output for listing rule executions. Specifies the tags applied to the webhook. A token that can be used in the next The detail returned for each webhook after listing webhooks, such as the webhook\n URL, the webhook name, and the webhook ARN. Gets a listing of all the webhooks in this Amazon Web Services Region for this\n account. The output lists all webhooks and includes the webhook URL and ARN and the\n configuration for each webhook. Lists the rules for the condition. The token that was returned from the previous ListWebhooks call, which can be used\n to return the next set of webhooks in the list. The rule owner to filter on. The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value. The rule Region to filter on. The JSON detail returned for each webhook in the list output for the ListWebhooks\n call. Lists the rules that are configured for the condition. Gets the set of key-value pairs (metadata) that are used to manage the\n resource. The Amazon Resource Name (ARN) of the resource to get tags for. The token that was returned from the previous API call, which would be used to return\n the next page of the list. The ListTagsforResource call lists all available tags in one\n call and does not use pagination. The maximum number of results to return in a single call. The tags for the resource. If the amount of returned information is significantly large, an identifier is also\n returned and can be used in a subsequent API call to return the next page of the list.\n The ListTagsforResource call lists all available tags in one call and does not use\n pagination. The detail returned for each webhook, such as the webhook authentication type and\n filter rules. A unique URL generated by CodePipeline. When a POST request is made to this\n URL, the defined pipeline is started as long as the body of the post request satisfies\n the defined authentication and filtering conditions. Deleting and re-creating a webhook\n makes the old URL invalid and generates a new one. The text of the error message about the webhook. The number code of the error. The date and time a webhook was last successfully triggered, in timestamp\n format. The Amazon Resource Name (ARN) of the webhook. Specifies the tags applied to the webhook. The detail returned for each webhook after listing webhooks, such as the webhook\n URL, the webhook name, and the webhook ARN. Gets a listing of all the webhooks in this Amazon Web Services Region for this\n account. The output lists all webhooks and includes the webhook URL and ARN and the\n configuration for each webhook. The token that was returned from the previous ListWebhooks call, which can be used\n to return the next set of webhooks in the list. The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value. The JSON detail returned for each webhook in the list output for the ListWebhooks\n call. If the amount of returned information is significantly large, an identifier is also\n returned and can be used in a subsequent ListWebhooks call to return the next set of\n webhooks in the list. Used to override a stage condition. The name of the pipeline with the stage that will override the condition. The name of the stage for the override. The ID of the pipeline execution for the override. The type of condition to override for the stage, such as entry conditions, failure conditions, or success conditions. The name of the S3 bucket. The key of the object in the S3 bucket, which uniquely identifies the object in the\n bucket. The location of the S3 bucket that contains a revision. The Amazon S3 artifact bucket for an action's artifacts. The artifact name. The Amazon S3 artifact location for an action's artifacts. The name of the action that processed the revision to the source\n artifact. The name of the rule configuration property. The system-generated unique ID that identifies the revision number of the\n artifact. Whether the configuration property is a required value. Summary information about the most recent revision of the artifact. For GitHub and\n CodeCommit repositories, the commit message. For Amazon S3 buckets or actions,\n the user-provided content of a Whether the configuration property is a key. The commit ID for the artifact revision. For artifacts stored in GitHub or\n CodeCommit repositories, the commit ID is linked to a commit details page. Whether the configuration property is secret. When updating a pipeline, passing * * * * * without changing any other values of\n the action preserves the previous value of the secret. Information about the version (or revision) of a source artifact that initiated a\n pipeline execution. The name of the action where the override will be applied. Indicates whether the property can be queried. If you create a pipeline with a condition and rule, and that rule contains a queryable property, the value for that configuration property is subject to other\n restrictions. The value must be less than or equal to twenty (20) characters. The value\n can contain only alphanumeric characters, underscores, and hyphens. The type of source revision, based on the source provider. For example, the revision\n type for the CodeCommit action provider is the commit ID. The description of the action configuration property that is displayed to\n users. The source revision, or version of your source artifact, with the changes that you\n want to run in the pipeline execution. The type of the configuration property. A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution. For the Represents information about a rule configuration property. The name of the stage. Represents information about a stage to a job worker. The name of the stage. The name of the rule that is created for the condition, such as CheckAllResults. Reserved for future use. The ID for the rule type, which is made up of the combined values for category, owner, provider, and version. The actions included in a stage. The action configuration fields for the rule. The method to use when a stage has not completed successfully. For example,\n configuring this field for rollback will roll back a failed stage automatically to the\n last successful pipeline execution in the stage. The input artifacts fields for the rule, such as specifying an input file for the rule. Represents information about a stage and its definition. The ID of the pipeline execution associated with the stage. The pipeline role ARN associated with the rule. The status of the stage, or for a completed stage, the last status of the\n stage. A status of cancelled means that the pipeline’s definition was updated before the\n stage execution could be completed. The Region for the condition associated with the rule. The type of pipeline execution for the stage, such as a rollback pipeline\n execution. The action timeout for the rule. Represents information about the run of a stage. Represents information about the rule to be created for an associated condition. An example would be creating a new rule for an entry condition, such as a rule that checks for a test result before allowing the run to enter the deployment stage. The execution ID for the run of the rule. The status of the run of the rule, such as FAILED. A summary of the run of the rule. The last status change of the rule. The system-generated token used to identify a unique request. The ARN of the user who last changed the rule. The external ID of the run of the rule. The URL of a resource external to Amazon Web Services that is used when running the\n rule (for example, an external repository URL). Represents information about each time a rule is run as part of the pipeline execution for a pipeline configured with conditions. The ID of the pipeline execution in the stage where the rule was run. Use the GetPipelineState action to retrieve the current pipelineExecutionId of\n the stage. The ID of the run for the rule. The version number of the pipeline with the stage where the rule was run. The name of the stage where the rule was run. The name of the rule that was run in the stage. The start time of the rule execution. The date and time of the last change to the rule execution, in timestamp\n format. The ARN of the user who changed the rule execution details. The status of the rule execution. Status categories are Input details for the rule execution, such as role ARN, Region, and input\n artifacts. Output details for the rule execution, such as the rule execution result. The details of the runs for a rule and the results produced on an artifact as it passes\n through stages in the pipeline. The pipeline execution ID used to filter rule execution history. Filter values for the rule execution. The ID for the rule type, which is made up of the combined values for category, owner, provider, and version. Configuration data for a rule execution, such as the resolved values for that run. Configuration data for a rule execution with all variable references replaced with\n their real values for the execution. The ARN of the IAM service role that performs the declared rule. This is assumed\n through the roleArn for the pipeline. The Amazon Web Services Region for the rule, such as us-east-1. Details of input artifacts of the rule that correspond to the rule \n execution. Input information used for a rule execution. Execution result information listed in the output details for a rule\n execution. Output details listed for a rule execution, such as the rule execution\n result. The external ID for the rule execution. The external provider summary for the rule execution. The deepest external link to the external resource (for example, a repository URL or\n deployment endpoint) that is used when running the rule. Execution result information, such as the external execution ID. The system-generated unique ID that identifies the revision number of the\n rule. The unique identifier of the change that set the state to this revision (for\n example, a deployment ID or timestamp). The date and time when the most recent version of the rule was created, in\n timestamp format. The change to a rule that creates a revision of the rule. The name of the rule. The ID of the current revision of the artifact successfully worked on by the\n job. Represents information about the latest run of an rule. A URL link for more information about the state of the action, such as a details page. A URL link for more information about the revision, such as a commit details\n page. Returns information about the state of a rule. Values returned in the Represents information about a rule type. Returns information about the settings for a rule type. The configuration properties for the rule type. The rule type, which is made up of the combined values for category, owner, provider, and version. A category defines what kind of rule can be run in the stage, and constrains\n the provider type for the rule. Valid categories are limited to one of the following\n values. INVOKE Approval Rule The creator of the rule being called. The valid value for the\n The provider of the service being called by the rule. Valid providers are\n determined by the rulecategory. For example, a managed rule in the Rule category type\n has an owner of AWS, which would be specified as\n A string that describes the rule version. The ID for the rule type, which is made up of the combined values for category, owner, provider, and version. The URL of a sign-up page where users can sign up for an external service and\n perform initial configuration of the action provided by that service. The URL returned to the CodePipeline console that provides a deep link to the\n resources of the external system, such as the configuration page for a CodeDeploy\n deployment group. This link is provided as part of the action display in the\n pipeline. The URL returned to the CodePipeline console that contains a link to the\n top-level landing page for the external system, such as the console page for CodeDeploy.\n This link is shown on the pipeline view page in the CodePipeline console and\n provides a link to the execution entity of the external action. The URL returned to the CodePipeline console that contains a link to the page\n where customers can update or change the configuration of the external action. Returns information about the settings for a rule type. The name of the S3 bucket. The key of the object in the S3 bucket, which uniquely identifies the object in the\n bucket. The location of the S3 bucket that contains a revision. The Amazon S3 artifact bucket for an action's artifacts. The artifact name. The Amazon S3 artifact location for an action's artifacts. The name of the action that processed the revision to the source\n artifact. The system-generated unique ID that identifies the revision number of the\n artifact. Summary information about the most recent revision of the artifact. For GitHub and\n CodeCommit repositories, the commit message. For Amazon S3 buckets or actions,\n the user-provided content of a The commit ID for the artifact revision. For artifacts stored in GitHub or\n CodeCommit repositories, the commit ID is linked to a commit details page. Information about the version (or revision) of a source artifact that initiated a\n pipeline execution. The name of the action where the override will be applied. The type of source revision, based on the source provider. For example, the revision\n type for the CodeCommit action provider is the commit ID. The source revision, or version of your source artifact, with the changes that you\n want to run in the pipeline execution. A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution. For the Represents information about the latest run of a condition for a stage. The states of the conditions for a run of a condition for a stage. The state of a run of a condition for a stage. The status of a run of a condition for a stage. A summary of the run of the condition for a stage. Represents information about the run of a condition for a stage. The name of the stage. Represents information about a stage to a job worker. The name of the stage. Reserved for future use. The actions included in a stage. The method to use when a stage has not completed successfully. For example,\n configuring this field for rollback will roll back a failed stage automatically to the\n last successful pipeline execution in the stage. The method to use when a stage has succeeded. For example,\n configuring this field for conditions will allow the stage to succeed when the conditions are met. The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met. Represents information about a stage and its definition. The ID of the pipeline execution associated with the stage. The status of the stage, or for a completed stage, the last status of the\n stage. A status of cancelled means that the pipeline’s definition was updated before the\n stage execution could be completed. The type of pipeline execution for the stage, such as a rollback pipeline\n execution. Represents information about the run of a stage. Information about the latest execution in the stage, including its ID and\n status. The state of the entry conditions for a stage. The state of the success conditions for a stage. The state of the failure conditions for a stage. Filter for pipeline executions that have successfully completed the stage in the\n current pipeline version. The conditions that are success conditions. The conditions for making checks that, if met, succeed a stage. With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To\n authenticate users from third-party identity providers (IdPs) in this API, you can\n link IdP users to native user profiles. Learn more\n about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference. This API reference provides detailed information about API operations and object types\n in Amazon Cognito. Along with resource management operations, the Amazon Cognito user pools API includes classes\n of operations and authorization models for client-side and server-side authentication of\n users. You can interact with operations in the Amazon Cognito user pools API as any of the\n following subjects. An administrator who wants to configure user pools, app clients, users,\n groups, or other user pool functions. A server-side app, like a web application, that wants to use its Amazon Web Services\n privileges to manage, authenticate, or authorize a user. A client-side app, like a mobile app, that wants to make unauthenticated\n requests to manage, authenticate, or authorize a user. For more information, see Using the Amazon Cognito user pools API and user pool endpoints\n in the Amazon Cognito Developer Guide. With your Amazon Web Services SDK, you can build the logic to support operational flows in every use\n case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started\n with the To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services\n SDKs. With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To\n authenticate users from third-party identity providers (IdPs) in this API, you can\n link IdP users to native user profiles. Learn more\n about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference. This API reference provides detailed information about API operations and object types\n in Amazon Cognito. Along with resource management operations, the Amazon Cognito user pools API includes classes\n of operations and authorization models for client-side and server-side authentication of\n users. You can interact with operations in the Amazon Cognito user pools API as any of the\n following subjects. An administrator who wants to configure user pools, app clients, users,\n groups, or other user pool functions. A server-side app, like a web application, that wants to use its Amazon Web Services\n privileges to manage, authenticate, or authorize a user. A client-side app, like a mobile app, that wants to make unauthenticated\n requests to manage, authenticate, or authorize a user. For more information, see Using the Amazon Cognito user pools API and user pool endpoints\n in the Amazon Cognito Developer Guide. With your Amazon Web Services SDK, you can build the logic to support operational flows in every use\n case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started\n with the To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services\n SDKs. This IAM-authenticated API operation provides a code that Amazon Cognito sent to your user\n when they signed up in your user pool. After your user enters their code, they confirm\n ownership of the email address or phone number that they provided, and their user\n account becomes active. Depending on your user pool configuration, your users will\n receive their confirmation code in an email or SMS message. Local users who signed up in your user pool are the only type of user who can confirm\n sign-up with a code. Users who federate through an external identity provider (IdP) have\n already been confirmed by their IdP. Administrator-created users confirm their accounts\n when they respond to their invitation email message and choose a password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n This IAM-authenticated API operation confirms user sign-up as an administrator.\n Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation.\n No confirmation code is required. This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can\n configure your user pool to not send confirmation codes to new users and instead confirm\n them with this API operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Creates a new user in the specified user pool. If This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. This message is based on a template that you configured in your call to create or\n update a user pool. This template includes your custom sign-up instructions and\n placeholders for user name and temporary password. Alternatively, you can call In either case, the user will be in the Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Creates a new user in the specified user pool. If This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. This message is based on a template that you configured in your call to create or\n update a user pool. This template includes your custom sign-up instructions and\n placeholders for user name and temporary password. Alternatively, you can call In either case, the user will be in the Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Resets the specified user's password in a user pool as an administrator. Works on any\n user. To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in\n after the API is called, Amazon Cognito responds with a\n Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Resets the specified user's password in a user pool as an administrator. Works on any\n user. To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in\n after the API is called, Amazon Cognito responds with a\n Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. An For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. An For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Updates the specified user's attributes, including developer attributes, as an\n administrator. Works on any user. To delete an attribute from your user, submit the\n attribute in your API request with a blank value. For custom attributes, you must prepend the In addition to updating user attributes, this API can also be used to mark phone and\n email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Updates the specified user's attributes, including developer attributes, as an\n administrator. Works on any user. To delete an attribute from your user, submit the\n attribute in your API request with a blank value. For custom attributes, you must prepend the In addition to updating user attributes, this API can also be used to mark phone and\n email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n The operating mode of advanced security features in custom authentication with \n \n Custom authentication challenge Lambda triggers.\n Advanced security configuration options for additional authentication types in your\n user pool, including custom\n authentication. Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA)\n for a user, with a unique private key that Amazon Cognito generates and returns in the API\n response. You can authorize an Amazon Cognito disassociates an existing software token when you verify the new token in a\n VerifySoftwareToken API request. If you don't verify the software\n token and your user pool doesn't require MFA, the user can then authenticate with\n user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito\n generates an After you set up software token MFA for your user, Amazon Cognito generates a\n Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA)\n for a user, with a unique private key that Amazon Cognito generates and returns in the API\n response. You can authorize an Amazon Cognito disassociates an existing software token when you verify the new token in a\n VerifySoftwareToken API request. If you don't verify the software\n token and your user pool doesn't require MFA, the user can then authenticate with\n user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito\n generates an After you set up software token MFA for your user, Amazon Cognito generates a\n Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. The CloudWatch logging destination of a user pool detailed activity logging\n configuration. Configuration for the CloudWatch log group destination of user pool detailed activity\n logging, or of user activity log export with advanced security features. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the\n pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the\n pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy. \n Learn more\n \n Using the Amazon Cognito user pools API and user pool endpoints\n Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to Valid values include: \n \n Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to Valid values include: \n \n Defaults to The ARN of an Amazon Data Firehose stream that's the destination for advanced security\n features log export. Configuration for the Amazon Data Firehose stream destination of user activity log export with\n advanced security features. Calling this API causes a message to be sent to the end user with a confirmation code\n that is required to change the user's password. For the If neither a verified phone number nor a verified email exists, this API returns\n To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Calling this API causes a message to be sent to the end user with a confirmation code\n that is required to change the user's password. For the If neither a verified phone number nor a verified email exists, this API returns\n To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Gets the detailed activity logging configuration for a user pool. Gets the logging configuration of a user pool. The ID of the user pool where you want to view detailed activity logging\n configuration. The ID of the user pool that has the logging configuration that you want to\n view. The detailed activity logging configuration of the requested user pool. The logging configuration of the requested user pool. Generates a user attribute verification code for the specified attribute name. Sends a\n message to a user with a code that they must return in a VerifyUserAttribute\n request. Authorize this action with a signed-in user's access token. It must include the scope Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Generates a user attribute verification code for the specified attribute name. Sends a\n message to a user with a code that they must return in a VerifyUserAttribute\n request. Authorize this action with a signed-in user's access token. It must include the scope Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user\n with a federated IdP with Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user\n with a federated IdP with Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. This exception is thrown when the trust relationship is not valid for the role\n provided for SMS configuration. This can happen if you don't trust\n This exception is thrown when the trust relationship is not valid for the role\n provided for SMS configuration. This can happen if you don't trust\n The The The source of events that your user pool sends for detailed activity logging. The source of events that your user pool sends for logging. To send error-level logs\n about user notification activity, set to The CloudWatch logging destination of a user pool. The CloudWatch log group destination of user pool detailed activity logs, or of user\n activity log export with advanced security features. The Amazon S3 bucket destination of user activity log export with advanced security\n features. To activate this setting, \n advanced security features must be active in your user pool. The Amazon Data Firehose stream destination of user activity log export with advanced security\n features. To activate this setting, \n advanced security features must be active in your user pool. The ID of the user pool where you configured detailed activity logging. The ID of the user pool where you configured logging. The detailed activity logging destination of a user pool. A logging destination of a user pool. User pools can have multiple logging\n destinations for message-delivery and user-activity logs. The logging parameters of a user pool. The logging parameters of a user pool returned in response to\n The message returned when a user's new password matches a previous password and \n doesn't comply with the password-history policy. In the password policy that you have set, refers to whether you have required users to\n use at least one symbol in their password. The number of previous passwords that you want Amazon Cognito to restrict each user from\n reusing. Users can't set a password that matches any of Password history isn't enforced and isn't displayed in DescribeUserPool responses when you set this value to\n Resends the confirmation (for confirmation of registration) to a specific user in the\n user pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Resends the confirmation (for confirmation of registration) to a specific user in the\n user pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. A For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide. Some API operations in a user pool generate a challenge, like a prompt for an MFA\n code, for device authentication that bypasses MFA, or for a custom authentication\n challenge. A For more information about custom authentication challenges, see Custom\n authentication challenge Lambda triggers. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.TOKEN
\n for a GitHub repository, SIGV4
for an Amazon Web Services CodeCommit\n repository, and SSH
for GitLab and Bitbucket repositories.type
, Amplify uses the default\n AMPLIFY_MANAGED
setting.AMPLIFY_MANAGED
cache configuration automatically applies an\n optimized cache configuration for your app based on its platform, routing rules, and\n rewrite rules. This is the default setting.AMPLIFY_MANAGED_NO_COOKIES
cache configuration type is the same as AMPLIFY_MANAGED
, except that it excludes all cookies from the cache key.AMPLIFY_MANAGED
to use the default certificate that Amplify\n provisions for you.CUSTOM
to use your own certificate that you have already added to\n Certificate Manager in your Amazon Web Services account. Make sure you request (or\n import) the certificate in the US East (N. Virginia) Region (us-east-1). For more\n information about using ACM, see Importing certificates into\n Certificate Manager in the ACM User\n guide .AMPLIFY_MANAGED
to use the default certificate that Amplify\n provisions for you.CUSTOM
to use your own certificate that you have already added to\n Certificate Manager in your Amazon Web Services account. Make sure you request (or\n import) the certificate in the US East (N. Virginia) Region (us-east-1). For more\n information about using ACM, see Importing certificates into\n Certificate Manager in the ACM User\n guide.accessToken
for GitHub repositories only. To authorize access to a\n repository provider such as Bitbucket or CodeCommit, use oauthToken
.accessToken
or oauthToken
when you\n update an app.\n
\n \n
",
"smithy.api#title": "Amazon AppIntegrations Service",
"smithy.rules#endpointRuleSet": {
"version": "1.0",
@@ -1092,7 +1098,7 @@
}
],
"traits": {
- "smithy.api#documentation": "\n
",
+ "smithy.api#documentation": "\n
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
.1698778057
.1698778057
.1698778057
.\n
"
+ }
}
},
"traits": {
@@ -1056,7 +1062,7 @@
"StartTime": {
"target": "smithy.api#Timestamp",
"traits": {
- "smithy.api#documentation": "\"Type\": \"AWS::Resource\"
\n \"ResourceType\": \"AWS::Logs::LogGroup\"
\n \"Identifier\": \"name-of-log-group\"
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n 1698778057
\n \n
"
+ }
}
},
"traits": {
diff --git a/models/appstream.json b/models/appstream.json
index 210a178f1c..3d58a636ab 100644
--- a/models/appstream.json
+++ b/models/appstream.json
@@ -2036,7 +2036,7 @@
"target": "com.amazonaws.appstream#String",
"traits": {
"smithy.api#clientOptional": {},
- "smithy.api#documentation": "\"Type\": \"AWS::Resource\"
\n \"ResourceType\": \"AWS::Logs::LogGroup\"
\n \"Identifier\": \"name-of-log-group\"
\n \n
\n \n
",
+ "smithy.api#documentation": "\n
\n \n
",
"smithy.api#required": {}
}
},
@@ -2067,7 +2067,7 @@
"DisconnectTimeoutInSeconds": {
"target": "com.amazonaws.appstream#Integer",
"traits": {
- "smithy.api#documentation": "DisconnectTimeoutInSeconds
time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n they try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds
elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds
elapses, they are\n disconnected.DisconnectTimeoutInSeconds
time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n they try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds
elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds
elapses, they are\n disconnected.DisconnectTimeoutInSeconds
time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds
elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds
elapses, they are\n disconnected.DisconnectTimeoutInSeconds
time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds
elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds
elapses, they are\n disconnected.\n
"
+ }
+ },
+ "DynamicAppProvidersEnabled": {
+ "target": "com.amazonaws.appstream#DynamicAppProvidersEnabled",
+ "traits": {
+ "smithy.api#documentation": "DisconnectTimeoutInSeconds
time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds
elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds
elapses, they are\n disconnected. DisconnectTimeoutInSeconds
time\n interval begins. Users are notified before they are disconnected due to inactivity. If\n users try to reconnect to the streaming session before the time interval specified in\n DisconnectTimeoutInSeconds
elapses, they are connected to their\n previous session. Users are considered idle when they stop providing keyboard or mouse\n input during their streaming session. File uploads and downloads, audio in, audio out,\n and pixels changing do not qualify as user activity. If users continue to be idle after\n the time interval in IdleDisconnectTimeoutInSeconds
elapses, they are\n disconnected. CLIPBOARD_COPY_FROM_LOCAL_DEVICE
and CLIPBOARD_COPY_TO_LOCAL_DEVICE
actions.ENABLED
. This can't be specified when the permission is DISABLED
. CLIPBOARD_COPY_FROM_LOCAL_DEVICE
and CLIPBOARD_COPY_TO_LOCAL_DEVICE
actions.ENABLED
. This can't be specified when the permission is DISABLED
. VPCZoneIdentifier
with\n AvailabilityZones
, the subnets that you specify must reside in those\n Availability Zones.VPCZoneIdentifier
with AvailabilityZones
, the subnets that\n you specify must reside in those Availability Zones.SUBMITTED
\n or\n PENDING
\n are\n canceled. A job\n inRUNNABLE
remains in RUNNABLE
until it reaches the head of the\n job queue. Then the job status is updated to\n FAILED
.PENDING
job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING
\n status.PENDING
, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.STARTING
or\n RUNNING
state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.SUBMITTED
, PENDING
, or RUNNABLE
state are cancelled and the job status is updated to FAILED
.PENDING
job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING
\n status.PENDING
, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.STARTING
or\n RUNNING
state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.\n
"
+ "smithy.api#documentation": "\n
"
}
},
"com.amazonaws.bedrockagentruntime#FlowInputContent": {
@@ -1725,7 +1725,7 @@
"document": {
"target": "smithy.api#Document",
"traits": {
- "smithy.api#documentation": "\n
"
+ "smithy.api#documentation": "\n
"
}
},
"com.amazonaws.bedrockagentruntime#FlowOutputEvent": {
@@ -1766,27 +1766,27 @@
"nodeName": {
"target": "com.amazonaws.bedrockagentruntime#NodeName",
"traits": {
- "smithy.api#documentation": "\n
",
+ "smithy.api#documentation": "\n
",
"smithy.api#sensitive": {}
}
},
@@ -2938,7 +2938,7 @@
"topP": {
"target": "com.amazonaws.bedrockagentruntime#TopP",
"traits": {
- "smithy.api#documentation": "Top P
determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP
to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.Top P
determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP
to 0.8, the model only selects the next token from the top 80% of the probability distribution of next tokens.InvokeFlow
.\n
"
}
+ },
+ "modelInvocationOutput": {
+ "target": "com.amazonaws.bedrockagentruntime#OrchestrationModelInvocationOutput",
+ "traits": {
+ "smithy.api#documentation": "type
is ORCHESTRATION
.text
contains the prompt.inferenceConfiguration
, parserMode
, and overrideLambda
values are set in the PromptOverrideConfiguration object that was set when the agent was created or updated.Converse
provides\n a consistent interface that works with all models that\n support messages. This allows you to write code once and use it with different models.\n Should a model have unique inference parameters, you can also pass those unique parameters\n to the model.bedrock:InvokeModel
action. Converse
provides\n a consistent interface that works with all models that\n support messages. This allows you to write code once and use it with different models.\n If a model has unique inference parameters, you can also pass those unique parameters\n to the model.bedrock:InvokeModel
action. ConverseStream
provides a consistent API\n that works with all Amazon Bedrock models that support messages.\n This allows you to write code once and use it with different models. Should a\n model have unique inference parameters, you can also pass those unique parameters to the\n model. responseStreamingSupported
field in the response.bedrock:InvokeModelWithResponseStream
action.ConverseStream
provides a consistent API\n that works with all Amazon Bedrock models that support messages.\n This allows you to write code once and use it with different models. Should a\n model have unique inference parameters, you can also pass those unique parameters to the\n model. responseStreamingSupported
field in the response.ConverseStream
.bedrock:InvokeModelWithResponseStream
action.responseStreamingSupported
field in the response.InvokeModelWithResponseStream
.bedrock:InvokeModelWithResponseStream
action. responseStreamingSupported
field in the response.InvokeModelWithResponseStream
.bedrock:InvokeModelWithResponseStream
action. GetModelCustomizationJob
operation to retrieve the job status.GetModelCustomizationJob
operation to retrieve the job status.ratingMethod
, and an optional description of the metric.ratingMethod
, and an optional description of the metric.CreateModelCustomizationJob
operation.CreateModelCustomizationJob
operation.nextToken
field when making another request to return the next batch of results.maxResults
value provided in the request, enter the token returned in the nextToken
field in the response in this field to return the next batch of results.true
) or if they were shared with the current account (false
).maxResults
value provided in the request, use this token when making another request in the nextToken
field to return the next batch of results.nextToken
field when making another request to return the next batch of results.maxResults
value provided in the request, enter the token returned in the nextToken
field in the response in this field to return the next batch of results.maxResults
value provided in the request, use this token when making another request in the nextToken
field to return the next batch of results.nextToken
field when making another request to return the next batch of results.maxResults
value provided in the request, enter the token returned in the nextToken
field in the response in this field to return the next batch of results.maxResults
value provided in the request, use this token when making another request in the nextToken
field to return the next batch of results.\n
"
+ }
+ },
+ "com.amazonaws.bedrock#ModelCopyResource": {
+ "type": "resource",
+ "operations": [
+ {
+ "target": "com.amazonaws.bedrock#CreateModelCopyJob"
+ },
+ {
+ "target": "com.amazonaws.bedrock#GetModelCopyJob"
+ },
+ {
+ "target": "com.amazonaws.bedrock#ListModelCopyJobs"
+ }
+ ]
+ },
"com.amazonaws.bedrock#ModelCustomization": {
"type": "enum",
"members": {
@@ -6531,7 +7043,7 @@
}
],
"traits": {
- "smithy.api#documentation": "additionalAnalyses
\n parameter is\n currently\n supported for the list analysis rule (AnalysisRuleList
) and the custom\n analysis rule (AnalysisRuleCustom
).ConfiguredTableAssociationAnalysisRule
is referred to as the\n collaboration analysis rule.allowedAdditionalAnalyses
\n parameter\n is currently supported for the list analysis rule\n (AnalysisRuleList
) and the custom analysis rule\n (AnalysisRuleCustom
).ConfiguredTableAssociationAnalysisRule
is referred to as the\n collaboration analysis rule.TRUE
)\n or are to cryptographically process every column\n (FALSE
).TRUE
) or are\n to cryptographically process every column (FALSE
).TRUE
)\n or are to contain only non-repeated values\n (FALSE
).TRUE
)\n or are to contain only non-repeated values (FALSE
).TRUE
)\n or can only be joined on Fingerprint columns of the same name\n (FALSE
).TRUE
) or can only be joined on Fingerprint columns of the\n same name (FALSE
).TRUE
)\n or cryptographically processed\n (FALSE
).TRUE
) or cryptographically processed (FALSE
).ConfiguredTableAssociationAnalysisRule
is referred to as the\n collaboration analysis rule.TRUE
) or not (FALSE
).FALSE
.TRUE
, Clean Rooms manages permissions for the ID mapping table resource. FALSE
, the resource owner manages permissions for the ID mapping table resource.TRUE
, Clean Rooms manages permissions for the ID namespace association resource.FALSE
, the resource owner manages permissions for the ID namespace association resource.TRUE
) or\n has not accepted to pay for query compute costs\n (FALSE
).\n
",
+ "smithy.api#documentation": "FALSE
but you are responsible to pay for\n query compute costs. TRUE
but you are not responsible to pay for\n query compute costs. TRUE
) or has not accepted to pay for query compute costs\n (FALSE
).\n
",
"smithy.api#required": {}
}
}
},
"traits": {
- "smithy.api#documentation": "FALSE
but you are responsible to pay for\n query compute costs. TRUE
but you are not responsible to pay for\n query compute costs. s3
\n output type.member
output type.TRUE
) or has not configured the collaboration member to pay for query\n compute costs (FALSE
).TRUE
value for more than one\n member in the collaboration. FALSE
value for the member who can\n query.TRUE
) or has not configured the collaboration\n member to pay for query compute costs (FALSE
).TRUE
value for more than one\n member in the collaboration. FALSE
value for the member who can\n query.DIRECT_ANALYSIS
) or used as input into additional analyses (ADDITIONAL_ANALYSIS
), such as a query that is a seed for a lookalike ML model.analysisType
of DIRECT_ANALYSIS
, ADDITIONAL_ANALYSIS_FOR_AUDIENCE_GENERATION
, or both.ConfiguredTableAssociationAnalysisRule
is referred to as the\n collaboration analysis rule.{\"user_id\": \"111111\"}
\n {\"user_id\": \"222222\"}
\n ...
\n {\"user_id\": \"111111\"}
\n {\"user_id\": \"222222\"}
\n ...
\n audienceSize
selected from this list. You can use the ABSOLUTE
\n AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage
\n AudienceSize to configure sizes in the range 1-100 percent.audienceSize
values when used in the StartAudienceExportJob for an audience generation job. You can use the ABSOLUTE
\n AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage
\n AudienceSize to configure sizes in the range 1-100 percent.authType
CODECONNECTIONS, \n this is the connectionArn
.authType
CODECONNECTIONS, \n this is the connectionArn
. For the authType
SECRETS_MANAGER, this is the secretArn
.aws/codecommit
Amazon Web Services managed key is used.aws/codecommit
Amazon Web Services managed key is used.ListRuleExecutions
\n call, which can be used to return the next set of rule executions in the\n list.ListRuleExecutions
call. To\n view all items in the list, continue to call this operation with each subsequent token\n until no more nextToken values are returned.codepipeline-artifact-revision-summary
key\n specified in the object metadata.S3_OBJECT_VERSION_ID
and S3_OBJECT_KEY
types of source revisions, either\n of the types can be used independently, or they can be used together to override the\n source with a specific ObjectKey and VersionID.InProgress
,\n Succeeded
, and Failed
.\nrevisionId
field indicate the rule revision information, such as the commit ID, for the current state.\n
",
+ "smithy.api#required": {}
+ }
+ },
+ "owner": {
+ "target": "com.amazonaws.codepipeline#RuleOwner",
+ "traits": {
+ "smithy.api#documentation": "Owner
field in the rule category is AWS
. AWS
.codepipeline-artifact-revision-summary
key\n specified in the object metadata.S3_OBJECT_VERSION_ID
and S3_OBJECT_KEY
types of source revisions, either\n of the types can be used independently, or they can be used together to override the\n source with a specific ObjectKey and VersionID.\n
\n CognitoIdentityProvider
client in other supported Amazon Web Services\n SDKs.\n
\n \n
\n CognitoIdentityProvider
client in other supported Amazon Web Services\n SDKs.\n
\n \n
\n \n
\n MessageAction
isn't set, the default is to send a welcome message via\n email or phone (SMS).AdminCreateUser
with SUPPRESS
\n for the MessageAction
parameter, and Amazon Cognito won't send any email. FORCE_CHANGE_PASSWORD
state until\n they sign in and change their password.\n
\n MessageAction
isn't set, the default is to send a welcome message via\n email or phone (SMS).AdminCreateUser
with SUPPRESS
\n for the MessageAction
parameter, and Amazon Cognito won't send any email. FORCE_CHANGE_PASSWORD
state until\n they sign in and change their password.\n
\n \n
\n \n
\n PasswordResetRequiredException
error. Your app must then perform the\n actions that reset your user's password: the forgot-password flow. In addition, if the\n user pool has phone verification selected and a verified phone number exists for the\n user, or if email verification is selected and a verified email exists for the user,\n calling this API will also result in sending a message to the end user with the code to\n change their password.\n
\n PasswordResetRequiredException
error. Your app must then perform the\n actions that reset your user's password: the forgot-password flow. In addition, if the\n user pool has phone verification selected and a verified phone number exists for the\n user, or if email verification is selected and a verified email exists for the user,\n calling this API will also result in sending a message to the end user with the code to\n change their password.\n
\n AdminRespondToAuthChallenge
API request provides the answer\n to that challenge, like a code or a secure remote password (SRP). The parameters of a\n response to an authentication challenge vary with the type of challenge.\n
\n AdminRespondToAuthChallenge
API request provides the answer\n to that challenge, like a code or a secure remote password (SRP). The parameters of a\n response to an authentication challenge vary with the type of challenge.\n
\n custom:
prefix to the\n attribute name.\n
\n custom:
prefix to the\n attribute name.\n
\n AssociateSoftwareToken
request with either\n the user's access token, or a session string from a challenge response that you received\n from Amazon Cognito.MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge\n each time your user signs. Complete setup with AssociateSoftwareToken
\n and VerifySoftwareToken
.SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to\n this challenge with your user's TOTP.AssociateSoftwareToken
request with either\n the user's access token, or a session string from a challenge response that you received\n from Amazon Cognito.MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge\n each time your user signs in. Complete setup with\n AssociateSoftwareToken
and VerifySoftwareToken
.SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to\n this challenge with your user's TOTP.\n
\n \n
\n ENABLED
and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY
, those APIs return a\n UserNotFoundException
exception if the user doesn't exist in the user\n pool.\n
"
+ "smithy.api#documentation": "ENABLED
- This prevents user existence-related errors.LEGACY
- This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.ENABLED
and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY
, those APIs return a\n UserNotFoundException
exception if the user doesn't exist in the user\n pool.\n
\n ENABLED
- This prevents user existence-related errors.LEGACY
- This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.LEGACY
when you don't provide a value.Username
parameter,\n you can use the username or user alias. The method used to send the confirmation code is\n sent according to the specified AccountRecoverySetting. For more information, see Recovering\n User Accounts in the Amazon Cognito Developer Guide. To\n use the confirmation code for resetting the password, call ConfirmForgotPassword. InvalidParameterException
. If your app client has a client secret and\n you don't provide a SECRET_HASH
parameter, this API returns\n NotAuthorizedException
.Username
parameter,\n you can use the username or user alias. The method used to send the confirmation code is\n sent according to the specified AccountRecoverySetting. For more information, see Recovering\n User Accounts in the Amazon Cognito Developer Guide. To\n use the confirmation code for resetting the password, call ConfirmForgotPassword. InvalidParameterException
. If your app client has a client secret and\n you don't provide a SECRET_HASH
parameter, this API returns\n NotAuthorizedException
.aws.cognito.signin.user.admin
.aws.cognito.signin.user.admin
.InitiateAuth
. For more information, see Adding user pool sign-in through a third party.InitiateAuth
. For more information, see Adding user pool sign-in through a third party.cognito-idp.amazonaws.com
or the external ID provided in the role does\n not match what is provided in the SMS configuration for the user pool.cognito-idp.amazonaws.com
or the external ID provided in the role does\n not match what is provided in the SMS configuration for the user pool.errorlevel
selection of logs that a user pool sends for detailed\n activity logging.errorlevel
selection of logs that a user pool sends for detailed\n activity logging. To send userNotification
activity with information about message delivery, choose ERROR
with\n CloudWatchLogsConfiguration
. To send userAuthEvents
\n activity with user logs from advanced security features, choose INFO
with\n one of CloudWatchLogsConfiguration
, FirehoseConfiguration
, or\n S3Configuration
.userNotification
. To send\n info-level logs about advanced security features user activity, set to\n userAuthEvents
.GetLogDeliveryConfiguration
.n
previous\n passwords, where n
is the value of PasswordHistorySize
.0
or don't provide it. To activate this setting, \n advanced security features must be active in your user pool.RespondToAuthChallenge
API request provides the answer to that\n challenge, like a code or a secure remote password (SRP). The parameters of a response\n to an authentication challenge vary with the type of challenge.RespondToAuthChallenge
API request provides the answer to that\n challenge, like a code or a secure remote password (SRP). The parameters of a response\n to an authentication challenge vary with the type of challenge.
Configuration for the Amazon S3 bucket destination of user activity log export with\n advanced security features.
" + } + }, "com.amazonaws.cognitoidentityprovider#SESConfigurationSet": { "type": "string", "traits": { @@ -12521,7 +12657,7 @@ } ], "traits": { - "smithy.api#documentation": "Sets up or modifies the detailed activity logging configuration of a user pool.
" + "smithy.api#documentation": "Sets up or modifies the logging configuration of a user pool. User pools can export\n user notification logs and advanced security features user activity logs.
" } }, "com.amazonaws.cognitoidentityprovider#SetLogDeliveryConfigurationRequest": { @@ -12530,14 +12666,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "The ID of the user pool where you want to configure detailed activity logging .
", + "smithy.api#documentation": "The ID of the user pool where you want to configure logging.
", "smithy.api#required": {} } }, "LogConfigurations": { "target": "com.amazonaws.cognitoidentityprovider#LogConfigurationListType", "traits": { - "smithy.api#documentation": "A collection of all of the detailed activity logging configurations for a user\n pool.
", + "smithy.api#documentation": "A collection of the logging configurations for a user pool.
", "smithy.api#required": {} } } @@ -12838,7 +12974,7 @@ } ], "traits": { - "smithy.api#documentation": "Sets the user pool multi-factor authentication (MFA) configuration.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nSets the user pool multi-factor authentication (MFA) configuration.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nRegisters the user in the specified user pool and creates a user name, password, and\n user attributes.
\nAmazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nRegisters the user in the specified user pool and creates a user name, password, and\n user attributes.
\nAmazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nThe UUID of the authenticated user. This isn't the same as\n username
.
The 128-bit ID of the authenticated user. This isn't the same as\n username
.
With this operation, your users can update one or more of their attributes with their\n own credentials. You authorize this API request with the user's access token. To delete\n an attribute from your user, submit the attribute in your API request with a blank\n value. Custom attribute values in this request must include the custom:
\n prefix.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nWith this operation, your users can update one or more of their attributes with their\n own credentials. You authorize this API request with the user's access token. To delete\n an attribute from your user, submit the attribute in your API request with a blank\n value. Custom attribute values in this request must include the custom:
\n prefix.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nUpdates the specified user pool with the specified attributes. You can get a list of\n the current user pool settings using DescribeUserPool.
\nIf you don't provide a value for an attribute, Amazon Cognito sets it to its default value.
\nAmazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.
\n\n Learn more\n
\n\n Using the Amazon Cognito user pools API and user pool endpoints\n
\nThis action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.
\nIf you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.
\nUpdates the specified user pool with the specified attributes. You can get a list of\n the current user pool settings using DescribeUserPool.
\nIf you don't provide a value for an attribute, Amazon Cognito sets it to its default value.
\nAmazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.
\n\n Learn more\n
\n\n Using the Amazon Cognito user pools API and user pool endpoints\n
\nErrors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED
and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY
, those APIs return a\n UserNotFoundException
exception if the user doesn't exist in the user\n pool.
Valid values include:
\n\n ENABLED
- This prevents user existence-related errors.
\n LEGACY
- This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.
Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED
and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY
, those APIs return a\n UserNotFoundException
exception if the user doesn't exist in the user\n pool.
Valid values include:
\n\n ENABLED
- This prevents user existence-related errors.
\n LEGACY
- This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.
Defaults to LEGACY
when you don't provide a value.
The operating mode of advanced security features in your user pool.
", + "smithy.api#documentation": "The operating mode of advanced security features for standard authentication types\n in your user pool, including username-password and secure remote password (SRP)\n authentication.\n
", "smithy.api#required": {} } + }, + "AdvancedSecurityAdditionalFlows": { + "target": "com.amazonaws.cognitoidentityprovider#AdvancedSecurityAdditionalFlowsType", + "traits": { + "smithy.api#documentation": "Advanced security configuration options for additional authentication types in your\n user pool, including custom\n authentication.
" + } } }, "traits": { @@ -15297,7 +15439,7 @@ "PreventUserExistenceErrors": { "target": "com.amazonaws.cognitoidentityprovider#PreventUserExistenceErrorTypes", "traits": { - "smithy.api#documentation": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED
and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY
, those APIs return a\n UserNotFoundException
exception if the user doesn't exist in the user\n pool.
Valid values include:
\n\n ENABLED
- This prevents user existence-related errors.
\n LEGACY
- This represents the old behavior of Amazon Cognito where user\n existence related errors aren't prevented.
Errors and responses that you want Amazon Cognito APIs to return during authentication, account\n confirmation, and password recovery when the user doesn't exist in the user pool. When\n set to ENABLED
and the user doesn't exist, authentication returns an error\n indicating either the username or password was incorrect. Account confirmation and\n password recovery return a response indicating a code was sent to a simulated\n destination. When set to LEGACY
, those APIs return a\n UserNotFoundException
exception if the user doesn't exist in the user\n pool.
Valid values include:
\n\n ENABLED
- This prevents user existence-related errors.
\n LEGACY
- This represents the early behavior of Amazon Cognito where user\n existence related errors aren't prevented.
Defaults to LEGACY
when you don't provide a value.
\n The value of the filter.\n
\nThe valid values for this parameter are as follows:
\nIf you specify the name
parameter as Finding
, specify\n Optimized
, NotOptimized
, or Unavailable
.
If you specify the name
parameter as FindingReasonCode
, specify\n CPUUnderprovisioned
, CPUOverprovisioned
, \n MemoryUnderprovisioned
, or MemoryOverprovisioned
.
\n The value of the filter.\n
\nThe valid values for this parameter are as follows:
\nIf you specify the name
parameter as Finding
, specify\n Optimized
, Underprovisioned
, or Overprovisioned
.
If you specify the name
parameter as FindingReasonCode
, specify\n CPUUnderprovisioned
, CPUOverprovisioned
, \n MemoryUnderprovisioned
, or MemoryOverprovisioned
.
\n The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n When this preference isn't specified, we use the default value DAYS_14
.\n
You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.
\n\n The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n When this preference isn't specified, we use the default value DAYS_14
.\n
You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.\n
\nAmazon EC2 instance lookback preferences can be set at the organization, account, and resource levels.
\nAuto Scaling group lookback preferences can only be set at the resource level.
\nContact Lens for Amazon Connect enables you to analyze conversations between customer and agents,\n by using speech transcription, natural language processing, and intelligent search\n capabilities. It performs sentiment analysis, detects issues, and enables you to automatically\n categorize contacts.
\nContact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent\n conversations. For more information, see Analyze conversations using\n Contact Lens in the Amazon Connect Administrator Guide.
", + "smithy.api#documentation": "\n Contact Lens actions\n
\nAmazon Connect Contact Lens enables you to analyze conversations between customer and agents, by using\n speech transcription, natural language processing, and intelligent search capabilities.\n It performs sentiment analysis, detects issues, and enables you to automatically\n categorize contacts.
\nAmazon Connect Contact Lens provides both real-time and post-call analytics of customer-agent\n conversations. For more information, see Analyze conversations\n using speech analytics in the Amazon Connect Administrator\n Guide.
", "smithy.api#title": "Amazon Connect Contact Lens", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -810,7 +810,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides the category rules that are used to automatically categorize contacts based on\n uttered keywords and phrases.
" + "smithy.api#documentation": "Provides the category rules that are used to automatically categorize contacts based\n on uttered keywords and phrases.
" } }, "com.amazonaws.connectcontactlens#CategoryDetails": { @@ -868,7 +868,7 @@ } }, "traits": { - "smithy.api#documentation": "For characters that were detected as issues, where they occur in the transcript.
" + "smithy.api#documentation": "For characters that were detected as issues, where they occur in the\n transcript.
" } }, "com.amazonaws.connectcontactlens#ContactId": { @@ -930,7 +930,7 @@ } }, "traits": { - "smithy.api#documentation": "Potential issues that are detected based on an artificial intelligence analysis of each\n turn in the conversation.
" + "smithy.api#documentation": "Potential issues that are detected based on an artificial intelligence analysis of\n each turn in the conversation.
" } }, "com.amazonaws.connectcontactlens#IssuesDetected": { @@ -1006,7 +1006,7 @@ "MaxResults": { "target": "com.amazonaws.connectcontactlens#MaxResults", "traits": { - "smithy.api#documentation": "The maximimum number of results to return per page.
" + "smithy.api#documentation": "The maximum number of results to return per page.
" } }, "NextToken": { @@ -1034,7 +1034,7 @@ "NextToken": { "target": "com.amazonaws.connectcontactlens#NextToken", "traits": { - "smithy.api#documentation": "If there are additional results, this is the token for the next set of results. If response includes nextToken
there are two possible scenarios:
There are more segments so another call is required to get them.
\nThere are no more segments at this time, but more may be available later (real-time\n analysis is in progress) so the client should call the operation again to get new\n segments.
\nIf response does not include nextToken
, the analysis is completed (successfully or failed) and there are no more segments to retrieve.
If there are additional results, this is the token for the next set of results. If response includes nextToken
there are two possible\n scenarios:
There are more segments so another call is required to get them.
\nThere are no more segments at this time, but more may be available later\n (real-time analysis is in progress) so the client should call the operation\n again to get new segments.
\nIf response does not include nextToken
, the analysis is completed\n (successfully or failed) and there are no more segments to retrieve.
The content of the summary.
" + } + }, + "Status": { + "target": "com.amazonaws.connectcontactlens#PostContactSummaryStatus", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "Whether the summary was successfully COMPLETED or FAILED to be generated.
", + "smithy.api#required": {} + } + }, + "FailureCode": { + "target": "com.amazonaws.connectcontactlens#PostContactSummaryFailureCode", + "traits": { + "smithy.api#documentation": "If the summary failed to be generated, one of the following failure codes\n occurs:
\n\n QUOTA_EXCEEDED
: The number of concurrent analytics jobs reached\n your service quota.
\n INSUFFICIENT_CONVERSATION_CONTENT
: The conversation needs to have\n at least one turn from both the participants in order to generate the\n summary.
\n FAILED_SAFETY_GUIDELINES
: The generated summary cannot be\n provided because it failed to meet system safety guidelines.
\n INVALID_ANALYSIS_CONFIGURATION
: This code occurs when, for\n example, you're using a \n language \n that isn't supported by generative AI-powered post-contact summaries.\n
\n INTERNAL_ERROR
: Internal system error.
Information about the post-contact summary.
" + } + }, + "com.amazonaws.connectcontactlens#PostContactSummaryContent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1762 + } + } + }, + "com.amazonaws.connectcontactlens#PostContactSummaryFailureCode": { + "type": "enum", + "members": { + "QUOTA_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUOTA_EXCEEDED" + } + }, + "INSUFFICIENT_CONVERSATION_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSUFFICIENT_CONVERSATION_CONTENT" + } + }, + "FAILED_SAFETY_GUIDELINES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_SAFETY_GUIDELINES" + } + }, + "INVALID_ANALYSIS_CONFIGURATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ANALYSIS_CONFIGURATION" + } + }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + } + } + }, + "com.amazonaws.connectcontactlens#PostContactSummaryStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + } + } + }, "com.amazonaws.connectcontactlens#RealtimeContactAnalysisSegment": { "type": "structure", "members": { @@ -1169,6 +1258,12 @@ "traits": { "smithy.api#documentation": "The matched category rules.
" } + }, + "PostContactSummary": { + "target": "com.amazonaws.connectcontactlens#PostContactSummary", + "traits": { + "smithy.api#documentation": "Information about the post-contact summary.
" + } } }, "traits": { @@ -1255,7 +1350,7 @@ "target": "com.amazonaws.connectcontactlens#ParticipantId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The identifier of the participant.
", + "smithy.api#documentation": "The identifier of the participant. Valid values are CUSTOMER or AGENT.
", "smithy.api#required": {} } }, @@ -1295,7 +1390,7 @@ "target": "com.amazonaws.connectcontactlens#SentimentValue", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The sentiment of the detected for this piece of transcript.
", + "smithy.api#documentation": "The sentiment detected for this piece of transcript.
", "smithy.api#required": {} } }, diff --git a/models/connect.json b/models/connect.json index 3e909398a2..4ac501a5db 100644 --- a/models/connect.json +++ b/models/connect.json @@ -789,12 +789,12 @@ "AgentIds": { "target": "com.amazonaws.connect#AgentIds", "traits": { - "smithy.api#documentation": "An object to specify a list of agents, by Agent ID.
" + "smithy.api#documentation": "An object to specify a list of agents, by user ID.
" } } }, "traits": { - "smithy.api#documentation": "Can be used to define a list of preferred agents to target the contact within the queue.\n Note that agents must have the queue in their routing profile in order to be offered the\n contact.
" + "smithy.api#documentation": "Can be used to define a list of preferred agents to target the contact to within the queue.\u2028\n Note that agents must have the queue in their routing profile in order to be offered the\u2028\n contact.
" } }, "com.amazonaws.connect#AgentsMinOneMaxHundred": { @@ -829,7 +829,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 2 + "max": 4 } } }, @@ -7150,7 +7150,7 @@ } ], "traits": { - "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\nInitiates an Amazon Connect instance with all the supported channels enabled. It does\n not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It\n also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.
\nAmazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.
", + "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\nInitiates an Amazon Connect instance with all the supported channels enabled. It does\n not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It\n also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.
\nFor more information, see Create an Amazon Connect\n instance in the Amazon Connect Administrator Guide.
\nAmazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.
", "smithy.api#http": { "method": "PUT", "uri": "/instance", @@ -7556,7 +7556,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new predefined attribute for the specified Amazon Connect instance.
", + "smithy.api#documentation": "Creates a new predefined attribute for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.
", "smithy.api#http": { "method": "PUT", "uri": "/predefined-attributes/{InstanceId}", @@ -8190,7 +8190,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a security profile.
", + "smithy.api#documentation": "Creates a security profile.
\nFor information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.
", "smithy.api#http": { "method": "PUT", "uri": "/security-profiles/{InstanceId}", @@ -10088,7 +10088,7 @@ } ], "traits": { - "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\nDeletes the Amazon Connect instance.
\nAmazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.
", + "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\nDeletes the Amazon Connect instance. For more information, see Delete your\n Amazon Connect instance in the Amazon Connect Administrator\n Guide.
\nAmazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. \nIf you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. \nYou must wait 30 days before you can restart creating and deleting instances in your account.
", "smithy.api#http": { "method": "DELETE", "uri": "/instance/{InstanceId}", @@ -10316,7 +10316,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a queue.
", + "smithy.api#documentation": "Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.
", "smithy.api#http": { "method": "DELETE", "uri": "/queues/{InstanceId}/{QueueId}", @@ -12027,7 +12027,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes a predefined attribute for the specified Amazon Connect instance.
", + "smithy.api#documentation": "Describes a predefined attribute for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.
", "smithy.api#http": { "method": "GET", "uri": "/predefined-attributes/{InstanceId}/{Name}", @@ -12460,7 +12460,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets basic information about the security profle.
", + "smithy.api#documentation": "Gets basic information about the security profile.
\nFor information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.
", "smithy.api#http": { "method": "GET", "uri": "/security-profiles/{InstanceId}/{SecurityProfileId}", @@ -16915,20 +16915,20 @@ "Filters": { "target": "com.amazonaws.connect#FiltersV2List", "traits": { - "smithy.api#documentation": "The filters to apply to returned metrics. You can filter on the following resources:
\nAgents
\nChannels
\nFeature
\nQueues
\nRouting profiles
\nRouting step expression
\nUser hierarchy groups
\nAt least one filter must be passed from queues, routing profiles, agents, or user hierarchy\n groups.
\nTo filter by phone number, see Create a historical\n metrics report in the Amazon Connect Administrator\n Guide.
\nNote the following limits:
\n\n Filter keys: A maximum of 5 filter keys are supported in\n a single request. Valid filter keys: AGENT
|\n AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
|\n AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
|\n AGENT_HIERARCHY_LEVEL_FIVE
| CASE_TEMPLATE_ARN
|\n CASE_STATUS
| CHANNEL
|\n contact/segmentAttributes/connect:Subtype
| FEATURE
|\n FLOW_TYPE
| FLOWS_NEXT_RESOURCE_ID
|\n FLOWS_NEXT_RESOURCE_QUEUE_ID
| FLOWS_OUTCOME_TYPE
|\n FLOWS_RESOURCE_ID
| INITIATION_METHOD
|\n RESOURCE_PUBLISHED_TIMESTAMP
| ROUTING_PROFILE
|\n ROUTING_STEP_EXPRESSION
| QUEUE
| Q_CONNECT_ENABLED
|\n
\n Filter values: A maximum of 100 filter values are\n supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the\n CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a\n GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total\n of 100 filter values, along with 3 channel filters.
\n contact_lens_conversational_analytics
is a valid filterValue for the\n FEATURE
filter key. It is available only to contacts analyzed by Contact Lens\n conversational analytics.
\n connect:Chat
, connect:SMS
, connect:Telephony
, and\n connect:WebRTC
are valid filterValue
examples (not exhaustive) for\n the contact/segmentAttributes/connect:Subtype filter
key.
\n ROUTING_STEP_EXPRESSION
is a valid filter key with a filter value up to 3000\n length. This filter is case and order sensitive. JSON string fields must be sorted in ascending\n order and JSON array order should be kept as is.
\n Q_CONNECT_ENABLED
. TRUE and FALSE are the only valid filterValues for the\n Q_CONNECT_ENABLED
filter key.
TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.
\nFALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow
\nThis filter is available only for contact record-driven metrics.
\nThe filters to apply to returned metrics. You can filter on the following resources:
\nAgents
\nCampaigns
\nChannels
\nFeature
\nQueues
\nRouting profiles
\nRouting step expression
\nUser hierarchy groups
\nAt least one filter must be passed from queues, routing profiles, agents, or user hierarchy\n groups.
\nFor metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least\n one filter requirement.
\nTo filter by phone number, see Create a historical\n metrics report in the Amazon Connect Administrator\n Guide.
\nNote the following limits:
\n\n Filter keys: A maximum of 5 filter keys are supported in\n a single request. Valid filter keys: AGENT
|\n AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
|\n AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
|\n AGENT_HIERARCHY_LEVEL_FIVE
| ANSWERING_MACHINE_DETECTION_STATUS
|\n CAMPAIGN
| CASE_TEMPLATE_ARN
| CASE_STATUS
|\n CHANNEL
| contact/segmentAttributes/connect:Subtype
|\n DISCONNECT_REASON
| FEATURE
| FLOW_TYPE
|\n FLOWS_NEXT_RESOURCE_ID
| FLOWS_NEXT_RESOURCE_QUEUE_ID
|\n FLOWS_OUTCOME_TYPE
| FLOWS_RESOURCE_ID
|\n INITIATION_METHOD
| RESOURCE_PUBLISHED_TIMESTAMP
|\n ROUTING_PROFILE
| ROUTING_STEP_EXPRESSION
| QUEUE
|\n Q_CONNECT_ENABLED
|
\n Filter values: A maximum of 100 filter values are\n supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the\n CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a\n GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total\n of 100 filter values, along with 3 channel filters.
\n contact_lens_conversational_analytics
is a valid filterValue for the\n FEATURE
filter key. It is available only to contacts analyzed by Contact Lens\n conversational analytics.
\n connect:Chat
, connect:SMS
, connect:Telephony
, and\n connect:WebRTC
are valid filterValue
examples (not exhaustive) for\n the contact/segmentAttributes/connect:Subtype filter
key.
\n ROUTING_STEP_EXPRESSION
is a valid filter key with a filter value up to 3000\n length. This filter is case and order sensitive. JSON string fields must be sorted in ascending\n order and JSON array order should be kept as is.
\n Q_CONNECT_ENABLED
. TRUE and FALSE are the only valid filterValues for the\n Q_CONNECT_ENABLED
filter key.
TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.
\nFALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow
\nThis filter is available only for contact record-driven metrics.
\n\n Campaign ARNs are valid filterValues
for the CAMPAIGN
\n filter key.
The grouping applied to the metrics that are returned. For example, when results are grouped\n by queue, the metrics returned are grouped by queue. The values that are returned apply to the\n metrics for each queue. They are not aggregated for all queues.
\nIf no grouping is specified, a summary of all metrics is returned.
\nValid grouping keys: AGENT
| AGENT_HIERARCHY_LEVEL_ONE
|\n AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
|\n AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
|\n CASE_TEMPLATE_ARN
| CASE_STATUS
| CHANNEL
|\n contact/segmentAttributes/connect:Subtype
| FLOWS_RESOURCE_ID
|\n FLOWS_MODULE_RESOURCE_ID
| FLOW_TYPE
| FLOWS_OUTCOME_TYPE
\n | INITIATION_METHOD
| Q_CONNECT_ENABLED
| QUEUE
|\n RESOURCE_PUBLISHED_TIMESTAMP
| ROUTING_PROFILE
|\n ROUTING_STEP_EXPRESSION
\n
The grouping applied to the metrics that are returned. For example, when results are grouped\n by queue, the metrics returned are grouped by queue. The values that are returned apply to the\n metrics for each queue. They are not aggregated for all queues.
\nIf no grouping is specified, a summary of all metrics is returned.
\nValid grouping keys: AGENT
| AGENT_HIERARCHY_LEVEL_ONE
|\n AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
|\n AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
|\n ANSWERING_MACHINE_DETECTION_STATUS
| CAMPAIGN
|\n CASE_TEMPLATE_ARN
| CASE_STATUS
| CHANNEL
|\n contact/segmentAttributes/connect:Subtype
| DISCONNECT_REASON
|\n FLOWS_RESOURCE_ID
| FLOWS_MODULE_RESOURCE_ID
| FLOW_TYPE
\n | FLOWS_OUTCOME_TYPE
| INITIATION_METHOD
|\n Q_CONNECT_ENABLED
| QUEUE
| RESOURCE_PUBLISHED_TIMESTAMP
\n | ROUTING_PROFILE
| ROUTING_STEP_EXPRESSION
\n
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Abandonment rate\n
\nThis metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Adherent time\n
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent answer rate\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Non-adherent time\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent\n non-response\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nData for this metric is available starting from October 1, 2023 0:00:00 GMT.
\nUI name: Agent non-response without customer abandons\n
\nUnit: Percentage
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Occupancy\n
\nThis metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Adherence\n
\nThis metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Scheduled time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average queue abandon time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Average active time\n
\nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average after contact work time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
. For now, this metric only\n supports the following as INITIATION_METHOD
: INBOUND
|\n OUTBOUND
| CALLBACK
| API
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Average agent API connecting time\n
\nThe Negate
key in Metric Level Filters is not applicable for this\n metric.
Unit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Average agent pause time\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Average contacts per case\n
\nUnit: Seconds
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Average case resolution time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average contact duration\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average conversation duration\n
\nUnit: Seconds
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Average flow time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent greeting time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression
\nUI name: Average handle time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average customer hold time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average customer hold time all contacts\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average holds\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\n \nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent interaction time\n
\nFeature is a valid filter but not a valid grouping.
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent interruptions\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent interruption time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average non-talk time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average queue answer time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nUI name: Average resolution time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average talk time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent talk time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average customer talk time\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases created\n
\nUnit: Count
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts created\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Count
\nValid metric filter key: INITIATION_METHOD
,\n DISCONNECT_REASON
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
\nUI name: API contacts handled\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Count
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\n \nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts hold disconnect\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts hold agent disconnect\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts hold customer disconnect\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts put on hold\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts transferred out external\n
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts transferred out internal\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts queued\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype
\nUI name: Contacts queued (enqueue timestamp)\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts removed from queue in X seconds\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nThreshold: For ThresholdValue
enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts resolved in X\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts transferred out\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts transferred out by agent\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts transferred out queue\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Current cases\n
\nUnit: Count
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Flows outcome\n
\nUnit: Count
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp
\nUI name: Flows started\n
\nUnit: Seconds
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Maximum flow time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Maximum queued time\n
\nUnit: Seconds
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Minimum flow time\n
\nUnit: Percent
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases resolved on first contact\n
\nUnit: Percent
\nValid groupings and filters: Queue, RoutingStepExpression
\nUI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.
\nUnit: Percent
\nValid groupings and filters: Queue, RoutingStepExpression
\nUI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.
\nUnit: Percent
\nValid metric filter key: FLOWS_OUTCOME_TYPE
\n
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Flows outcome percentage.
\nThe FLOWS_OUTCOME_TYPE
is not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Non-talk\n time percent\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Talk time\n percent\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Agent\n talk time percent\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Customer talk time percent\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases reopened\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases resolved\n
\nYou can include up to 20 SERVICE_LEVEL metrics in a request.
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Service level X\n
\nUnit: Count
\nValid groupings and filters: Queue, RoutingStepExpression
\nUI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: After\n contact work time\n
\nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
. This metric only supports the\n following filter keys as INITIATION_METHOD
: INBOUND
|\n OUTBOUND
| CALLBACK
| API
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent API connecting time\n
\nThe Negate
key in Metric Level Filters is not applicable for this\n metric.
Unit: Count
\nMetric filter:
\nValid values: API
| Incoming
| Outbound
|\n Transfer
| Callback
| Queue_Transfer
|\n Disconnect
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
\nUI name: Contact abandoned\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts abandoned in X seconds\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts answered in X seconds\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contact flow time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent on contact time\n
\nValid metric filter key: DISCONNECT_REASON
\n
Unit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contact disconnected\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Error status time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contact handle time\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Customer hold time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent idle time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Agent interaction and hold time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent interaction time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Non-Productive Time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Online time\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nUI name: Callback attempts\n
\nThe metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Abandonment rate\n
\nThis metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Adherent time\n
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent answer rate\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Non-adherent time\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent\n non-response\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nData for this metric is available starting from October 1, 2023 0:00:00 GMT.
\nUI name: Agent non-response without customer abandons\n
\nUnit: Percentage
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Occupancy\n
\nThis metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Adherence\n
\nThis metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Scheduled time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average queue abandon time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Average active time\n
\nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average after contact work time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
. For now, this metric only\n supports the following as INITIATION_METHOD
: INBOUND
|\n OUTBOUND
| CALLBACK
| API
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Average agent API connecting time\n
\nThe Negate
key in Metric Level Filters is not applicable for this\n metric.
Unit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Average agent pause time\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Average contacts per case\n
\nUnit: Seconds
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Average case resolution time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average contact duration\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average conversation duration\n
\nThis metric is available only for contacts analyzed by outbound campaigns\n analytics.
\nUnit: Count
\nValid groupings and filters: Campaign, Agent, Queue, Routing Profile
\nUI name: Average dials per minute\n
\nUnit: Seconds
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Average flow time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent greeting time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression
\nUI name: Average handle time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average customer hold time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average customer hold time all contacts\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average holds\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\n \nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent interaction time\n
\nFeature is a valid filter but not a valid grouping.
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent interruptions\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent interruption time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average non-talk time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average queue answer time\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nUI name: Average resolution time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average talk time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average agent talk time\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Average customer talk time\n
\nThis metric is available only for contacts analyzed by outbound campaigns\n analytics.
\nUnit: Seconds
\nValid groupings and filters: Campaign
\nUI name: Average wait time after customer connection\n
\nThis metric is available only for contacts analyzed by outbound campaigns\n analytics.
\nUnit: Count
\nValid groupings and filters: Campaign, Agent
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter GT
(for\n Greater than).
UI name: Campaign contacts abandoned after X\n
\nThis metric is available only for contacts analyzed by outbound campaigns\n analytics.
\nUnit: Percent
\nValid groupings and filters: Campaign, Agent
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter GT
(for\n Greater than).
UI name: Campaign contacts abandoned after X rate\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases created\n
\nUnit: Count
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts created\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Count
\nValid metric filter key: INITIATION_METHOD
,\n DISCONNECT_REASON
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
\nUI name: API contacts handled\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Count
\nValid metric filter key: INITIATION_METHOD
\n
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\n \nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts hold disconnect\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts hold agent disconnect\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts hold customer disconnect\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts put on hold\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts transferred out external\n
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contacts transferred out internal\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts queued\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype
\nUI name: Contacts queued (enqueue timestamp)\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts removed from queue in X seconds\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nThreshold: For ThresholdValue
enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts resolved in X\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts transferred out\n
\nFeature is a valid filter but not a valid grouping.
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts transferred out by agent\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contacts transferred out queue\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Current cases\n
\nThis metric is available only for contacts analyzed by outbound campaigns\n analytics.
\nUnit: Count
\nValid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS
,\n DISCONNECT_REASON
\n
Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status,\n Disconnect Reason
\nUI name: Delivery attempts\n
\nThis metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.
\nUnit: Percent
\nValid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS
,\n DISCONNECT_REASON
\n
Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason
\nAnswering Machine Detection Status and Disconnect Reason are valid filters but not valid\n groupings.
\nUI name: Delivery attempt disposition rate\n
\nUnit: Count
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Flows outcome\n
\nUnit: Count
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp
\nUI name: Flows started\n
\nThis metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.
\nUnit: Count
\nValid groupings and filters: Campaign, Agent
\nUI name: Human answered\n
\nUnit: Seconds
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Maximum flow time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Maximum queued time\n
\nUnit: Seconds
\nValid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Minimum flow time\n
\nUnit: Percent
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases resolved on first contact\n
\nUnit: Percent
\nValid groupings and filters: Queue, RoutingStepExpression
\nUI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.
\nUnit: Percent
\nValid groupings and filters: Queue, RoutingStepExpression
\nUI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.
\nUnit: Percent
\nValid metric filter key: FLOWS_OUTCOME_TYPE
\n
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp
\nUI name: Flows outcome percentage.
\nThe FLOWS_OUTCOME_TYPE
is not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Non-talk\n time percent\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Talk time\n percent\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Agent\n talk time percent\n
\nThis metric is available only for contacts analyzed by Contact Lens conversational\n analytics.
\nUnit: Percentage
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Customer talk time percent\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases reopened\n
\nUnit: Count
\nRequired filter key: CASE_TEMPLATE_ARN
\nValid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
\nUI name: Cases resolved\n
\nYou can include up to 20 SERVICE_LEVEL metrics in a request.
\nUnit: Percent
\nValid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Service level X\n
\nUnit: Count
\nValid groupings and filters: Queue, RoutingStepExpression
\nUI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: After\n contact work time\n
\nUnit: Seconds
\nValid metric filter key: INITIATION_METHOD
. This metric only supports the\n following filter keys as INITIATION_METHOD
: INBOUND
|\n OUTBOUND
| CALLBACK
| API
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent API connecting time\n
\nThe Negate
key in Metric Level Filters is not applicable for this\n metric.
Unit: Count
\nMetric filter:
\nValid values: API
| Incoming
| Outbound
|\n Transfer
| Callback
| Queue_Transfer
|\n Disconnect
\n
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
\nUI name: Contact abandoned\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts abandoned in X seconds\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
UI name: Contacts answered in X seconds\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contact flow time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent on contact time\n
\nValid metric filter key: DISCONNECT_REASON
\n
Unit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect
\nUI name: Contact disconnected\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Error status time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Contact handle time\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Customer hold time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent idle time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
\nUI name: Agent interaction and hold time\n
\nUnit: Seconds
\nValid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
\nUI name: Agent interaction time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Non-Productive Time\n
\nUnit: Seconds
\nValid groupings and filters: Routing Profile, Agent, Agent Hierarchy
\nUI name: Online time\n
\nUnit: Count
\nValid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect
\nUI name: Callback attempts\n
\nLists predefined attributes for the specified Amazon Connect instance.
", + "smithy.api#documentation": "Lists predefined attributes for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.
", "smithy.api#http": { "method": "GET", "uri": "/predefined-attributes/{InstanceId}", @@ -22729,7 +22729,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the permissions granted to a security profile.
", + "smithy.api#documentation": "Lists the permissions granted to a security profile.
\nFor information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.
", "smithy.api#http": { "method": "GET", "uri": "/security-profiles-permissions/{InstanceId}/{SecurityProfileId}", @@ -22839,7 +22839,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides summary information about the security profiles for the specified Amazon Connect instance.
\nFor more information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide.
", + "smithy.api#documentation": "Provides summary information about the security profiles for the specified Amazon Connect instance.
\nFor more information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.
", "smithy.api#http": { "method": "GET", "uri": "/security-profiles-summary/{InstanceId}", @@ -23845,12 +23845,12 @@ "AgentsCriteria": { "target": "com.amazonaws.connect#AgentsCriteria", "traits": { - "smithy.api#documentation": "An object to define AgentIds
.
An object to define agentIds.
" } } }, "traits": { - "smithy.api#documentation": "An object to define AgentsCriteria
.
An object to define AgentsCriteria.
" } }, "com.amazonaws.connect#MaxResult10": { @@ -28279,6 +28279,67 @@ } } }, + "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryContent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1270 + } + } + }, + "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryFailureCode": { + "type": "enum", + "members": { + "QUOTA_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUOTA_EXCEEDED" + } + }, + "INSUFFICIENT_CONVERSATION_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSUFFICIENT_CONVERSATION_CONTENT" + } + }, + "FAILED_SAFETY_GUIDELINES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_SAFETY_GUIDELINES" + } + }, + "INVALID_ANALYSIS_CONFIGURATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ANALYSIS_CONFIGURATION" + } + }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + } + } + }, + "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + } + } + }, "com.amazonaws.connect#RealTimeContactAnalysisSegmentAttachments": { "type": "structure", "members": { @@ -28405,6 +28466,33 @@ "smithy.api#documentation": "Segment type containing a list of detected issues.
" } }, + "com.amazonaws.connect#RealTimeContactAnalysisSegmentPostContactSummary": { + "type": "structure", + "members": { + "Content": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryContent", + "traits": { + "smithy.api#documentation": "The content of the summary.
" + } + }, + "Status": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryStatus", + "traits": { + "smithy.api#documentation": "Whether the summary was successfully COMPLETED or FAILED to be generated.
", + "smithy.api#required": {} + } + }, + "FailureCode": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisPostContactSummaryFailureCode", + "traits": { + "smithy.api#documentation": "If the summary failed to be generated, one of the following failure codes occurs:
\n\n QUOTA_EXCEEDED
: The number of concurrent analytics jobs reached your service\n quota.
\n INSUFFICIENT_CONVERSATION_CONTENT
: The conversation needs to have at least\n one turn from both the participants in order to generate the summary.
\n FAILED_SAFETY_GUIDELINES
: The generated summary cannot be provided because it\n failed to meet system safety guidelines.
\n INVALID_ANALYSIS_CONFIGURATION
: This code occurs when, for example, you're\n using a language that isn't supported by generative AI-powered post-contact summaries.\n
\n INTERNAL_ERROR
: Internal system error.
Information about the post-contact summary for a real-time contact segment.
" + } + }, "com.amazonaws.connect#RealTimeContactAnalysisSegmentTranscript": { "type": "structure", "members": { @@ -28504,6 +28592,12 @@ "traits": { "smithy.api#enumValue": "Attachments" } + }, + "PostContactSummary": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PostContactSummary" + } } } }, @@ -28515,7 +28609,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 5 + "max": 6 } } }, @@ -28708,6 +28802,12 @@ "traits": { "smithy.api#documentation": "The analyzed attachments.
" } + }, + "PostContactSummary": { + "target": "com.amazonaws.connect#RealTimeContactAnalysisSegmentPostContactSummary", + "traits": { + "smithy.api#documentation": "Information about the post-contact summary.
" + } } }, "traits": { @@ -29437,6 +29537,60 @@ "smithy.api#documentation": "Latest routing criteria on the contact.
" } }, + "com.amazonaws.connect#RoutingCriteriaInput": { + "type": "structure", + "members": { + "Steps": { + "target": "com.amazonaws.connect#RoutingCriteriaInputSteps", + "traits": { + "smithy.api#documentation": "When Amazon Connect does not find an available agent meeting the requirements in a step for\u2028 \n a given step duration, the routing criteria will move on to the next step sequentially until a\u2028 \n join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent \n in the queue.
" + } + } + }, + "traits": { + "smithy.api#documentation": "An object to define the RoutingCriteria.
" + } + }, + "com.amazonaws.connect#RoutingCriteriaInputStep": { + "type": "structure", + "members": { + "Expiry": { + "target": "com.amazonaws.connect#RoutingCriteriaInputStepExpiry", + "traits": { + "smithy.api#documentation": "An object to specify the expiration of a routing step.
" + } + }, + "Expression": { + "target": "com.amazonaws.connect#Expression", + "traits": { + "smithy.api#documentation": "A tagged union to specify expression for a routing step.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Step defines the list of agents to be routed or route based on the agent requirements such as ProficiencyLevel, \n Name, or Value.
" + } + }, + "com.amazonaws.connect#RoutingCriteriaInputStepExpiry": { + "type": "structure", + "members": { + "DurationInSeconds": { + "target": "com.amazonaws.connect#DurationInSeconds", + "traits": { + "smithy.api#documentation": "The number of seconds that the contact will be routed only to agents matching this routing\u2028 step, if expiry \n was configured for this routing step.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify whether this routing criteria step should apply for only a limited amount of time,\u2028 or if it should \n never expire.
" + } + }, + "com.amazonaws.connect#RoutingCriteriaInputSteps": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#RoutingCriteriaInputStep" + } + }, "com.amazonaws.connect#RoutingCriteriaStepStatus": { "type": "enum", "members": { @@ -30973,7 +31127,7 @@ } ], "traits": { - "smithy.api#documentation": "Predefined attributes that meet certain criteria.
", + "smithy.api#documentation": "Searches predefined attributes that meet certain criteria. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.
", "smithy.api#http": { "method": "POST", "uri": "/search-predefined-attributes", @@ -31594,7 +31748,7 @@ } ], "traits": { - "smithy.api#documentation": "Searches security profiles in an Amazon Connect instance, with optional\n filtering.
", + "smithy.api#documentation": "Searches security profiles in an Amazon Connect instance, with optional\n filtering.
\nFor information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.
", "smithy.api#http": { "method": "POST", "uri": "/search-security-profiles", @@ -35162,7 +35316,7 @@ "Comparison": { "target": "com.amazonaws.connect#ResourceArnOrId", "traits": { - "smithy.api#documentation": "The type of comparison. Only \"less than\" (LT) comparisons are supported.
" + "smithy.api#documentation": "The type of comparison. Only \"less than\" (LT) and \"greater than\" (GT) comparisons are\n supported.
" } }, "ThresholdValue": { @@ -36732,6 +36886,12 @@ "traits": { "smithy.api#documentation": "Priority of the contact in the queue. The default priority for new contacts is 5. You can\n raise the priority of a contact compared to other contacts in the queue by assigning them a\n higher priority, such as 1 or 2.
" } + }, + "RoutingCriteria": { + "target": "com.amazonaws.connect#RoutingCriteriaInput", + "traits": { + "smithy.api#documentation": "Updates the routing criteria on the contact. These properties can be used to change how a\u2028\n contact is routed within the queue.
" + } } }, "traits": { @@ -37471,7 +37631,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a predefined attribute for the specified Amazon Connect instance.
", + "smithy.api#documentation": "Updates a predefined attribute for the specified Amazon Connect instance. Predefined\n attributes are attributes in an Amazon Connect instance that can be used to route\n contacts to an agent or pools of agents within a queue. For more information, see Create\n predefined attributes for routing contacts to agents.
", "smithy.api#http": { "method": "POST", "uri": "/predefined-attributes/{InstanceId}/{Name}", @@ -38530,7 +38690,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a security profile.
", + "smithy.api#documentation": "Updates a security profile.
\nFor information about security profiles, see Security Profiles in the\n Amazon Connect Administrator Guide. For a mapping of the API name and\n user interface name of the security profile permissions, see List of security profile\n permissions.
", "smithy.api#http": { "method": "POST", "uri": "/security-profiles/{InstanceId}/{SecurityProfileId}", diff --git a/models/controlcatalog.json b/models/controlcatalog.json index 1f9a73a503..32b25e7876 100644 --- a/models/controlcatalog.json +++ b/models/controlcatalog.json @@ -163,6 +163,39 @@ "target": "com.amazonaws.controlcatalog#CommonControlSummary" } }, + "com.amazonaws.controlcatalog#ControlArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 34, + "max": 2048 + }, + "smithy.api#pattern": "^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+$" + } + }, + "com.amazonaws.controlcatalog#ControlBehavior": { + "type": "enum", + "members": { + "PREVENTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PREVENTIVE" + } + }, + "PROACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROACTIVE" + } + }, + "DETECTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DETECTIVE" + } + } + } + }, "com.amazonaws.controlcatalog#ControlCatalog": { "type": "service", "version": "2018-05-10", @@ -170,6 +203,9 @@ { "target": "com.amazonaws.controlcatalog#CommonControlResource" }, + { + "target": "com.amazonaws.controlcatalog#ControlResource" + }, { "target": "com.amazonaws.controlcatalog#DomainResource" }, @@ -881,6 +917,88 @@ } } }, + "com.amazonaws.controlcatalog#ControlResource": { + "type": "resource", + "identifiers": { + "ControlArn": { + "target": "com.amazonaws.controlcatalog#ControlArn" + } + }, + "read": { + "target": "com.amazonaws.controlcatalog#GetControl" + }, + "list": { + "target": "com.amazonaws.controlcatalog#ListControls" + }, + "traits": { + "aws.api#arn": { + "template": "{ControlArn}", + "absolute": true + }, + "aws.iam#disableConditionKeyInference": {}, + "aws.iam#iamResource": { + "name": "control" + } + } + }, + "com.amazonaws.controlcatalog#ControlScope": { + "type": "enum", + "members": { + "GLOBAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOBAL" + } + }, + "REGIONAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REGIONAL" + } + } + } + }, + "com.amazonaws.controlcatalog#ControlSummary": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.controlcatalog#ControlArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the control.
", + "smithy.api#required": {} + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The display name of the control.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A description of the control, as it may appear in the console. Describes the functionality of the control.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Overview of information about a control.
" + } + }, + "com.amazonaws.controlcatalog#Controls": { + "type": "list", + "member": { + "target": "com.amazonaws.controlcatalog#ControlSummary" + } + }, + "com.amazonaws.controlcatalog#DeployableRegions": { + "type": "list", + "member": { + "target": "com.amazonaws.controlcatalog#RegionCode" + } + }, "com.amazonaws.controlcatalog#DomainArn": { "type": "string", "traits": { @@ -982,6 +1100,98 @@ "target": "com.amazonaws.controlcatalog#DomainSummary" } }, + "com.amazonaws.controlcatalog#GetControl": { + "type": "operation", + "input": { + "target": "com.amazonaws.controlcatalog#GetControlRequest" + }, + "output": { + "target": "com.amazonaws.controlcatalog#GetControlResponse" + }, + "errors": [ + { + "target": "com.amazonaws.controlcatalog#AccessDeniedException" + }, + { + "target": "com.amazonaws.controlcatalog#InternalServerException" + }, + { + "target": "com.amazonaws.controlcatalog#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.controlcatalog#ThrottlingException" + }, + { + "target": "com.amazonaws.controlcatalog#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns details about a specific control, most notably a list of Amazon Web Services Regions where this control is supported. Input a value for the ControlArn parameter, in ARN form. GetControl
accepts controltower or controlcatalog control ARNs as input. Returns a controlcatalog ARN format.
In the API response, controls that have the value GLOBAL
in the Scope
field do not show the DeployableRegions
field, because it does not apply. Controls that have the value REGIONAL
in the Scope
field return a value for the DeployableRegions
field, as shown in the example.
The Amazon Resource Name (ARN) of the control. It has one of the following formats:
\n\n Global format\n
\n\n arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}
\n
\n Or Regional format\n
\n\n arn:{PARTITION}:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}
\n
Here is a more general pattern that covers Amazon Web Services Control Tower and Control Catalog ARNs:
\n\n ^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\\\-]+$
\n
The Amazon Resource Name (ARN) of the control.
", + "smithy.api#required": {} + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The display name of the control.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A description of what the control does.
", + "smithy.api#required": {} + } + }, + "Behavior": { + "target": "com.amazonaws.controlcatalog#ControlBehavior", + "traits": { + "smithy.api#documentation": "A term that identifies the control's functional behavior. One of Preventive
, Deteictive
, Proactive
\n
Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/list-controls" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Controls" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.controlcatalog#ListControlsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.controlcatalog#PaginationToken", + "traits": { + "smithy.api#documentation": "The pagination token that's used to fetch the next set of results.
", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.controlcatalog#MaxListControlsResults", + "traits": { + "smithy.api#documentation": "The maximum number of results on a page or for an API request call.
", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.controlcatalog#ListControlsResponse": { + "type": "structure", + "members": { + "Controls": { + "target": "com.amazonaws.controlcatalog#Controls", + "traits": { + "smithy.api#documentation": "Returns a list of controls, given as structures of type controlSummary.
", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.controlcatalog#PaginationToken", + "traits": { + "smithy.api#documentation": "The pagination token that's used to fetch the next set of results.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.controlcatalog#ListDomains": { "type": "operation", "input": { @@ -1260,6 +1551,15 @@ } } }, + "com.amazonaws.controlcatalog#MaxListControlsResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, "com.amazonaws.controlcatalog#MaxListDomainsResults": { "type": "integer", "traits": { @@ -1408,6 +1708,46 @@ } } }, + "com.amazonaws.controlcatalog#RegionCode": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9-]{1,128}$" + } + }, + "com.amazonaws.controlcatalog#RegionConfiguration": { + "type": "structure", + "members": { + "Scope": { + "target": "com.amazonaws.controlcatalog#ControlScope", + "traits": { + "smithy.api#documentation": "The coverage of the control, if deployed. Scope is an enumerated type, with value Regional
, or Global
. A control with Global scope is effective in all Amazon Web Services Regions, regardless of the Region from which it is enabled, or to which it is deployed. A control implemented by an SCP is usually Global in scope. A control with Regional scope has operations that are restricted specifically to the Region from which it is enabled and to which it is deployed. Controls implemented by Config rules and CloudFormation hooks usually are Regional in scope. Security Hub controls usually are Regional in scope.
Regions in which the control is available to be deployed.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment.
\nIf you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration
API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A
,B
,and C
while the control is available in Regions A
, B
, C,
and D
, you'd see a response with DeployableRegions
of A
, B
, C
, and D
for a control with REGIONAL
scope, even though you may not intend to deploy the control in Region D
, because you do not govern it through your landing zone.
The requested resource does not exist.
", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, "com.amazonaws.controlcatalog#ThrottlingException": { "type": "structure", "members": { diff --git a/models/controltower.json b/models/controltower.json index 996779814b..b70920fe1f 100644 --- a/models/controltower.json +++ b/models/controltower.json @@ -67,7 +67,7 @@ "x-amzn-trace-id" ] }, - "smithy.api#documentation": "Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources:
\n\n \n Controls\n \n
\n\n DisableControl\n
\n\n EnableControl\n
\n\n GetEnabledControl\n
\n\n ListEnabledControls\n
\n\n UpdateEnabledControl\n
\n\n \n Landing zones\n \n
\n\n CreateLandingZone\n
\n\n DeleteLandingZone\n
\n\n GetLandingZone\n
\n\n ListLandingZones\n
\n\n ResetLandingZone\n
\n\n UpdateLandingZone\n
\n\n \n Baselines\n \n
\n\n DisableBaseline\n
\n\n EnableBaseline\n
\n\n GetBaseline\n
\n\n GetBaselineOperation\n
\n\n GetEnabledBaseline\n
\n\n ListBaselines\n
\n\n ListEnabledBaselines\n
\n\n ResetEnabledBaseline\n
\n\n Tagging\n
\n\n ListTagsForResource\n
\n\n TagResource\n
\n\n UntagResource\n
\nFor more information about these types of resources, see the \n Amazon Web Services Control Tower User Guide\n .
\n\n About control APIs\n
\nThese interfaces allow you to apply the Amazon Web Services library of pre-defined\n controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.
\nTo call these APIs, you'll need to know:
\nthe controlIdentifier
for the control--or guardrail--you are targeting.
the ARN associated with the target organizational unit (OU), which we call the targetIdentifier
.
the ARN associated with a resource that you wish to tag or untag.
\n\n To get the controlIdentifier
for your Amazon Web Services Control Tower\n control:\n
The controlIdentifier
is an ARN that is specified for each\n control. You can view the controlIdentifier
in the console on the Control details page, as well as in the documentation.
The controlIdentifier
is unique in each Amazon Web Services Region for each control. You can\n find the controlIdentifier
for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide.
A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and\n Elective controls is given in Resource identifiers for\n APIs and controls in the \n Amazon Web Services Control Tower Controls Reference Guide\n . Remember that Mandatory controls cannot be added or removed.
\n\n ARN format:\n arn:aws:controltower:{REGION}::control/{CONTROL_NAME}
\n
\n Example:\n
\n\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED
\n
\n To get the targetIdentifier
:\n
The targetIdentifier
is the ARN for an OU.
In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.
\n\n OU ARN format:\n
\n\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}
\n
\n About landing zone APIs\n
\nYou can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs.
\nFor an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the \"Actions\" section.
\n\n About baseline APIs\n
\nYou can apply the AWSControlTowerBaseline
baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines.
You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines.
\nThe individual API operations for baselines are detailed in this document, the API reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.
\n\n Details and examples\n
\n\n Creating Amazon Web Services Control Tower resources with Amazon Web Services CloudFormation\n
\nTo view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n
\n\n Recording API Requests\n
\nAmazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your\n Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by\n CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made\n the request and when, and so on. For more about Amazon Web Services Control Tower and its support for\n CloudTrail, see Logging Amazon Web Services Control Tower\n Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about\n CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User\n Guide.
", + "smithy.api#documentation": "Amazon Web Services Control Tower offers application programming interface (API)\n operations that support programmatic interaction with these types of resources:
\n\n \n Controls\n \n
\n\n DisableControl\n
\n\n EnableControl\n
\n\n GetEnabledControl\n
\n\n ListEnabledControls\n
\n\n UpdateEnabledControl\n
\n\n \n Landing\n zones\n \n
\n\n CreateLandingZone\n
\n\n DeleteLandingZone\n
\n\n GetLandingZone\n
\n\n ListLandingZones\n
\n\n ResetLandingZone\n
\n\n UpdateLandingZone\n
\n\n \n Baselines\n \n
\n\n DisableBaseline\n
\n\n EnableBaseline\n
\n\n GetBaseline\n
\n\n GetBaselineOperation\n
\n\n GetEnabledBaseline\n
\n\n ListBaselines\n
\n\n ListEnabledBaselines\n
\n\n ResetEnabledBaseline\n
\n\n \n Tagging\n \n
\n\n ListTagsForResource\n
\n\n TagResource\n
\n\n UntagResource\n
\nFor more information about these types of resources, see the \n Amazon Web Services Control Tower User Guide\n .
\n\n About control APIs\n
\nThese interfaces allow you to apply the Amazon Web Services library of pre-defined\n controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.
\nTo call these APIs, you'll need to know:
\nthe controlIdentifier
for the control--or guardrail--you are\n targeting.
the ARN associated with the target organizational unit (OU), which we call the\n targetIdentifier
.
the ARN associated with a resource that you wish to tag or untag.
\n\n To get the controlIdentifier
for your Amazon Web Services Control Tower control:\n
The controlIdentifier
is an ARN that is specified for each control. You can\n view the controlIdentifier
in the console on the Control\n details page, as well as in the documentation.
\n About identifiers for Amazon Web Services Control Tower\n
\nThe Amazon Web Services Control Tower controlIdentifier
is unique in each Amazon Web Services Region for each\n control. You can find the controlIdentifier
for each Region and control in the\n Tables of control metadata or the Control\n availability by Region tables in the Amazon Web Services Control Tower\n Controls Reference Guide.
A quick-reference list of control identifers for the Amazon Web Services Control Tower\n legacy Strongly recommended and Elective controls\n is given in Resource\n identifiers for APIs and controls in the \n Amazon Web Services Control Tower Controls Reference Guide\n . Remember\n that Mandatory controls cannot be added or removed.
\n\n Some controls have two identifiers\n
\n\n ARN format for Amazon Web Services Control\n Tower:\n arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}
\n
\n Example:\n
\n\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED
\n
\n ARN format for Amazon Web Services Control\n Catalog:\n arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}
\n
You can find the {CONTROL_CATALOG_OPAQUE_ID}
in the \n Amazon Web Services Control Tower Controls Reference\n Guide\n , or in the Amazon Web Services Control Tower console, on the\n Control details page.
The Amazon Web Services Control Tower APIs for enabled controls, such as\n GetEnabledControl
and ListEnabledControls
always return an\n ARN of the same type given when the control was enabled.
\n To get the targetIdentifier
:\n
The targetIdentifier
is the ARN for an OU.
In the Amazon Web Services Organizations console, you can find the ARN for the OU on the\n Organizational unit details page associated with that\n OU.
\n\n OU ARN format:\n
\n\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}
\n
\n About landing zone APIs\n
\nYou can configure and launch an Amazon Web Services Control Tower landing zone with APIs.\n For an introduction and steps, see Getting started with\n Amazon Web Services Control Tower using APIs.
\nFor an overview of landing zone API operations, see \n Amazon Web Services Control Tower supports landing zone APIs. The individual API\n operations for landing zones are detailed in this document, the API reference\n manual, in the \"Actions\" section.
\n\n About baseline APIs\n
\nYou can apply the AWSControlTowerBaseline
baseline to an organizational\n unit (OU) as a way to register the OU with Amazon Web Services Control Tower,\n programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration\n with baselines.
You can call the baseline API operations to view the baselines that Amazon Web Services\n Control Tower enables for your landing zone, on your behalf, when setting up the landing\n zone. These baselines are read-only baselines.
\nThe individual API operations for baselines are detailed in this document, the API\n reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.
\n\n About Amazon Web Services Control Catalog identifiers\n
\nThe EnableControl
and DisableControl
API operations can\n be called by specifying either the Amazon Web Services Control Tower identifer or the\n Amazon Web Services Control Catalog identifier. The API response returns the same\n type of identifier that you specified when calling the API.
If you use an Amazon Web Services Control Tower identifier to call the\n EnableControl
API, and then call EnableControl
again\n with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control\n Tower returns an error message stating that the control is already enabled. Similar\n behavior applies to the DisableControl
API operation.
Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only.
\n\n Details and examples\n
\n\n Creating Amazon Web Services Control Tower resources with Amazon Web Services\n CloudFormation\n
\nTo view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n
\n\n Recording API Requests\n
\nAmazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that\n records Amazon Web Services API calls for your Amazon Web Services account and delivers log\n files to an Amazon S3 bucket. By using information collected by CloudTrail, you can\n determine which requests the Amazon Web Services Control Tower service received, who made\n the request and when, and so on. For more about Amazon Web Services Control Tower and its\n support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the\n Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including\n how to turn it on and find your log files, see the Amazon Web Services CloudTrail User\n Guide.
", "smithy.api#title": "AWS Control Tower", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/cost-optimization-hub.json b/models/cost-optimization-hub.json index 08cdf093d0..52e9b667e5 100644 --- a/models/cost-optimization-hub.json +++ b/models/cost-optimization-hub.json @@ -1943,7 +1943,7 @@ "includeMemberAccounts": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "The enrollment status of all member accounts in the organization if the account is the\n management account.
" + "smithy.api#documentation": "The enrollment status of all member accounts in the organization if the account is the\n management account or delegated administrator.
" } }, "nextToken": { @@ -2006,13 +2006,22 @@ "maxResults": { "target": "com.amazonaws.costoptimizationhub#MaxResults", "traits": { - "smithy.api#documentation": "The maximum number of recommendations that are returned for the request.
", + "smithy.api#documentation": "The maximum number of recommendations to be returned for the request.
", "smithy.api#range": { "min": 0, "max": 1000 } } }, + "metrics": { + "target": "com.amazonaws.costoptimizationhub#SummaryMetricsList", + "traits": { + "smithy.api#documentation": "Additional metrics to be returned for the request. The only valid value is\n savingsPercentage
.
List of all savings recommendations.
" + "smithy.api#documentation": "A list of all savings recommendations.
" } }, "groupBy": { @@ -2051,6 +2060,15 @@ "smithy.api#documentation": "The currency code used for the recommendation.
" } }, + "metrics": { + "target": "com.amazonaws.costoptimizationhub#SummaryMetricsResult", + "traits": { + "smithy.api#documentation": "The results or descriptions for the additional metrics, based on whether the metrics were\n or were not requested.
", + "smithy.api#tags": [ + "delegatedAdmin" + ] + } + }, "nextToken": { "target": "smithy.api#String", "traits": { @@ -3353,6 +3371,43 @@ "smithy.api#documentation": "The storage configuration used for recommendations.
" } }, + "com.amazonaws.costoptimizationhub#SummaryMetrics": { + "type": "enum", + "members": { + "SAVINGS_PERCENTAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SavingsPercentage" + } + } + } + }, + "com.amazonaws.costoptimizationhub#SummaryMetricsList": { + "type": "list", + "member": { + "target": "com.amazonaws.costoptimizationhub#SummaryMetrics" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.costoptimizationhub#SummaryMetricsResult": { + "type": "structure", + "members": { + "savingsPercentage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The savings percentage based on your Amazon Web Services spend over the past 30\n days.
\nSavings percentage is only supported when filtering by Region, account ID, or\n tags.
\nThe results or descriptions for the additional metrics, based on whether the metrics were\n or were not requested.
" + } + }, "com.amazonaws.costoptimizationhub#Tag": { "type": "structure", "members": { @@ -3421,7 +3476,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization\n Hub service.
\nIf the account is a management account of an organization, this action can also be used to\n enroll member accounts of the organization.
\nYou must have the appropriate permissions to opt in to Cost Optimization Hub and to view\n its recommendations. When you opt in, Cost Optimization Hub automatically creates a\n service-linked role in your account to access its data.
" + "smithy.api#documentation": "Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization\n Hub service.
\nIf the account is a management account or delegated administrator of an organization, this\n action can also be used to enroll member accounts of the organization.
\nYou must have the appropriate permissions to opt in to Cost Optimization Hub and to view\n its recommendations. When you opt in, Cost Optimization Hub automatically creates a\n service-linked role in your account to access its data.
" } }, "com.amazonaws.costoptimizationhub#UpdateEnrollmentStatusRequest": { @@ -3437,7 +3492,7 @@ "includeMemberAccounts": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether to enroll member accounts of the organization if the account is the\n management account.
" + "smithy.api#documentation": "Indicates whether to enroll member accounts of the organization if the account is the\n management account or delegated administrator.
" } } }, diff --git a/models/datazone.json b/models/datazone.json index 42d9639333..45659cbcf9 100644 --- a/models/datazone.json +++ b/models/datazone.json @@ -483,6 +483,104 @@ } ] }, + "com.amazonaws.datazone#AssetFilterConfiguration": { + "type": "union", + "members": { + "columnConfiguration": { + "target": "com.amazonaws.datazone#ColumnFilterConfiguration", + "traits": { + "smithy.api#documentation": "The column configuration of the asset filter.
" + } + }, + "rowConfiguration": { + "target": "com.amazonaws.datazone#RowFilterConfiguration", + "traits": { + "smithy.api#documentation": "The row configuration of the asset filter.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration details of the asset filter.
" + } + }, + "com.amazonaws.datazone#AssetFilterSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where the asset filter lives.
", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "The name of the asset filter.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset filter.
" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "The status of the asset filter.
" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "The effective column names of the asset filter.
" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The effective row filter of the asset filter.
" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the asset filter was created.
" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The error message that is displayed if the action does not succeed.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The summary of the asset filter.
" + } + }, + "com.amazonaws.datazone#AssetFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AssetFilterSummary" + } + }, "com.amazonaws.datazone#AssetId": { "type": "string", "traits": { @@ -495,6 +593,38 @@ "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" } }, + "com.amazonaws.datazone#AssetInDataProductListingItem": { + "type": "structure", + "members": { + "entityId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The entity ID of the listing of the asset in a data product.
" + } + }, + "entityRevision": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The entity revision of the listing of the asset in a data product.
" + } + }, + "entityType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The entity type of the listing of the asset in a data product.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The listing of the asset in a data product.
" + } + }, + "com.amazonaws.datazone#AssetInDataProductListingItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AssetInDataProductListingItem" + } + }, "com.amazonaws.datazone#AssetItem": { "type": "structure", "members": { @@ -1462,6 +1592,26 @@ "smithy.api#documentation": "Part of the provisioning properties of the environment blueprint.
" } }, + "com.amazonaws.datazone#ColumnFilterConfiguration": { + "type": "structure", + "members": { + "includedColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "Specifies whether to include column names.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The column configuration of the asset filter.
" + } + }, + "com.amazonaws.datazone#ColumnNameList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, "com.amazonaws.datazone#ConfigurableActionParameter": { "type": "structure", "members": { @@ -1590,6 +1740,177 @@ "smithy.api#idempotent": {} } }, + "com.amazonaws.datazone#CreateAssetFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateAssetFilterInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateAssetFilterOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a data asset filter.
", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain in which you want to create an asset filter.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "The name of the asset filter.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset filter.
" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "The configuration of the asset filter.
", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateAssetFilterOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where the asset filter is created.
", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the asset.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "The name of the asset filter.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset filter.
" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "The status of the asset filter.
" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "The configuration of the asset filter.
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the asset filter was created.
" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The error message that is displayed if the asset filter is not created\n successfully.
" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "The column names in the asset filter.
" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The row filter in the asset filter.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#CreateAssetInput": { "type": "structure", "members": { @@ -2207,13 +2528,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateDataSource": { + "com.amazonaws.datazone#CreateDataProduct": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateDataSourceInput" + "target": "com.amazonaws.datazone#CreateDataProductInput" }, "output": { - "target": "com.amazonaws.datazone#CreateDataSourceOutput" + "target": "com.amazonaws.datazone#CreateDataProductOutput" }, "errors": [ { @@ -2239,99 +2560,66 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon DataZone data source.
", + "smithy.api#documentation": "Creates a data product.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/data-sources" + "uri": "/v2/domains/{domainIdentifier}/data-products" }, "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateDataSourceInput": { + "com.amazonaws.datazone#CreateDataProductInput": { "type": "structure", "members": { - "name": { - "target": "com.amazonaws.datazone#Name", - "traits": { - "smithy.api#documentation": "The name of the data source.
", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of the data source.
" - } - }, "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain where the data source is created.
", + "smithy.api#documentation": "The ID of the domain where the data product is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "projectIdentifier": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone project in which you want to add this data\n source.
", - "smithy.api#required": {} - } - }, - "environmentIdentifier": { - "target": "smithy.api#String", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.
", + "smithy.api#documentation": "The name of the data product.
", "smithy.api#required": {} } }, - "type": { - "target": "com.amazonaws.datazone#DataSourceType", + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The type of the data source.
", + "smithy.api#documentation": "The ID of the owning project of the data product.
", "smithy.api#required": {} } }, - "configuration": { - "target": "com.amazonaws.datazone#DataSourceConfigurationInput", - "traits": { - "smithy.api#documentation": "Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration
or redshiftRunConfiguration
.
Specifies whether the business name generation is to be enabled for this data\n source.
" - } - }, - "enableSetting": { - "target": "com.amazonaws.datazone#EnableSetting", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "Specifies whether the data source is enabled.
" + "smithy.api#documentation": "The description of the data product.
" } }, - "schedule": { - "target": "com.amazonaws.datazone#ScheduleConfiguration", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "The schedule of the data source runs.
" + "smithy.api#documentation": "The glossary terms of the data product.
" } }, - "publishOnImport": { - "target": "smithy.api#Boolean", + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList", "traits": { - "smithy.api#documentation": "Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.
" + "smithy.api#documentation": "The metadata forms of the data product.
" } }, - "assetFormsInput": { - "target": "com.amazonaws.datazone#FormInputList", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "The metadata forms that are to be attached to the assets that this data source works\n with.
" + "smithy.api#documentation": "The data assets of the data product.
" } }, "clientToken": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#ClientToken", "traits": { "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", "smithy.api#idempotencyToken": {} @@ -2342,133 +2630,98 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateDataSourceOutput": { + "com.amazonaws.datazone#CreateDataProductOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#DataSourceId", + "domainId": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The unique identifier of the data source.
", + "smithy.api#documentation": "The ID of the domain where the data product lives.
", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#DataSourceStatus", - "traits": { - "smithy.api#documentation": "The status of the data source.
" - } - }, - "type": { - "target": "com.amazonaws.datazone#DataSourceType", - "traits": { - "smithy.api#documentation": "The type of the data source.
" - } - }, - "name": { - "target": "com.amazonaws.datazone#Name", + "id": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "The name of the data source.
", + "smithy.api#documentation": "The ID of the data product.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of the data source.
" - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the data source is created.
", + "smithy.api#documentation": "The revision of the data product.
", "smithy.api#required": {} } }, - "projectId": { + "owningProjectId": { "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project to which the data source is added.
", + "smithy.api#documentation": "The ID of the owning project of the data product.
", "smithy.api#required": {} } }, - "environmentId": { - "target": "com.amazonaws.datazone#EnvironmentId", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.
", + "smithy.api#documentation": "The name of the data product.
", "smithy.api#required": {} } }, - "configuration": { - "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", + "status": { + "target": "com.amazonaws.datazone#DataProductStatus", "traits": { - "smithy.api#documentation": "Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration
or redshiftRunConfiguration
.
The status of the data product.
", + "smithy.api#required": {} } }, - "recommendation": { - "target": "com.amazonaws.datazone#RecommendationConfiguration", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "Specifies whether the business name generation is to be enabled for this data\n source.
" + "smithy.api#documentation": "The description of the data product.
" } }, - "enableSetting": { - "target": "com.amazonaws.datazone#EnableSetting", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "Specifies whether the data source is enabled.
" + "smithy.api#documentation": "The glossary terms of the data product.
" } }, - "publishOnImport": { - "target": "smithy.api#Boolean", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.
" + "smithy.api#documentation": "The data assets of the data product.
" } }, - "assetFormsOutput": { + "formsOutput": { "target": "com.amazonaws.datazone#FormOutputList", "traits": { - "smithy.api#documentation": "The metadata forms attached to the assets that this data source creates.
" - } - }, - "schedule": { - "target": "com.amazonaws.datazone#ScheduleConfiguration", - "traits": { - "smithy.api#documentation": "The schedule of the data source runs.
" - } - }, - "lastRunStatus": { - "target": "com.amazonaws.datazone#DataSourceRunStatus", - "traits": { - "smithy.api#documentation": "The status of the last run of this data source.
" - } - }, - "lastRunAt": { - "target": "com.amazonaws.datazone#DateTime", - "traits": { - "smithy.api#documentation": "The timestamp that specifies when the data source was last run.
" + "smithy.api#documentation": "The metadata forms of the data product.
" } }, - "lastRunErrorMessage": { - "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "Specifies the error message that is returned if the operation cannot be successfully\n completed.
" + "smithy.api#documentation": "The timestamp at which the data product was created.
" } }, - "errorMessage": { - "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "Specifies the error message that is returned if the operation cannot be successfully\n completed.
" + "smithy.api#documentation": "The user who created the data product.
" } }, - "createdAt": { - "target": "com.amazonaws.datazone#DateTime", + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "The timestamp of when the data source was created.
" + "smithy.api#documentation": "The timestamp at which the first revision of the data product was created.
" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#DateTime", + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The timestamp of when the data source was updated.
" + "smithy.api#documentation": "The user who created the first revision of the data product.
" } } }, @@ -2476,13 +2729,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateDomain": { + "com.amazonaws.datazone#CreateDataProductRevision": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateDomainInput" + "target": "com.amazonaws.datazone#CreateDataProductRevisionInput" }, "output": { - "target": "com.amazonaws.datazone#CreateDomainOutput" + "target": "com.amazonaws.datazone#CreateDataProductRevisionOutput" }, "errors": [ { @@ -2497,9 +2750,6 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, - { - "target": "com.amazonaws.datazone#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -2508,61 +2758,67 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon DataZone domain.
", + "smithy.api#documentation": "Creates a data product revision.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains" + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions" }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateDomainInput": { + "com.amazonaws.datazone#CreateDataProductRevisionInput": { "type": "structure", "members": { - "name": { - "target": "smithy.api#String", + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The name of the Amazon DataZone domain.
", + "smithy.api#documentation": "The ID of the domain where the data product revision is created.
", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "description": { - "target": "smithy.api#String", + "identifier": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "The description of the Amazon DataZone domain.
" + "smithy.api#documentation": "The ID of the data product revision.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "singleSignOn": { - "target": "com.amazonaws.datazone#SingleSignOn", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "The single-sign on configuration of the Amazon DataZone domain.
" + "smithy.api#documentation": "The name of the data product revision.
", + "smithy.api#required": {} } }, - "domainExecutionRole": { - "target": "com.amazonaws.datazone#RoleArn", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.
", - "smithy.api#required": {} + "smithy.api#documentation": "The description of the data product revision.
" } }, - "kmsKeyIdentifier": { - "target": "com.amazonaws.datazone#KmsKeyArn", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.
" + "smithy.api#documentation": "The glossary terms of the data product revision.
" } }, - "tags": { - "target": "com.amazonaws.datazone#Tags", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "The tags specified for the Amazon DataZone domain.
" + "smithy.api#documentation": "The data assets of the data product revision.
" + } + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList", + "traits": { + "smithy.api#documentation": "The metadata forms of the data product revision.
" } }, "clientToken": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#ClientToken", "traits": { "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", "smithy.api#idempotencyToken": {} @@ -2573,119 +2829,112 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateDomainOutput": { + "com.amazonaws.datazone#CreateDataProductRevisionOutput": { "type": "structure", "members": { - "id": { + "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain.
", + "smithy.api#documentation": "The ID of the domain where data product revision is created.
", "smithy.api#required": {} } }, - "name": { - "target": "smithy.api#String", + "id": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "The name of the Amazon DataZone domain.
" + "smithy.api#documentation": "The ID of the data product revision.
", + "smithy.api#required": {} } }, - "description": { - "target": "smithy.api#String", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The description of the Amazon DataZone domain.
" + "smithy.api#documentation": "The revision of the data product revision.
", + "smithy.api#required": {} } }, - "singleSignOn": { - "target": "com.amazonaws.datazone#SingleSignOn", + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The single-sign on configuration of the Amazon DataZone domain.
" + "smithy.api#documentation": "The ID of the owning project of the data product revision.
", + "smithy.api#required": {} } }, - "domainExecutionRole": { - "target": "com.amazonaws.datazone#RoleArn", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.
" + "smithy.api#documentation": "The name of the data product revision.
", + "smithy.api#required": {} } }, - "arn": { - "target": "smithy.api#String", + "status": { + "target": "com.amazonaws.datazone#DataProductStatus", "traits": { - "smithy.api#documentation": "The ARN of the Amazon DataZone domain.
" + "smithy.api#default": "CREATED", + "smithy.api#documentation": "The status of the data product revision.
", + "smithy.api#required": {} } }, - "kmsKeyIdentifier": { - "target": "com.amazonaws.datazone#KmsKeyArn", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.
" + "smithy.api#documentation": "The description of the data product revision.
" } }, - "status": { - "target": "com.amazonaws.datazone#DomainStatus", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "The status of the Amazon DataZone domain.
" + "smithy.api#documentation": "The glossary terms of the data product revision.
" } }, - "portalUrl": { - "target": "smithy.api#String", + "items": { + "target": "com.amazonaws.datazone#DataProductItems", "traits": { - "smithy.api#documentation": "The URL of the data portal for this Amazon DataZone domain.
" + "smithy.api#documentation": "The data assets of the data product revision.
" } }, - "tags": { - "target": "com.amazonaws.datazone#Tags", + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", "traits": { - "smithy.api#documentation": "The tags specified for the Amazon DataZone domain.
" + "smithy.api#documentation": "The metadata forms of the data product revision.
" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.datazone#CreateEnvironment": { - "type": "operation", - "input": { - "target": "com.amazonaws.datazone#CreateEnvironmentInput" - }, - "output": { - "target": "com.amazonaws.datazone#CreateEnvironmentOutput" - }, - "errors": [ - { - "target": "com.amazonaws.datazone#AccessDeniedException" - }, - { - "target": "com.amazonaws.datazone#ConflictException" }, - { - "target": "com.amazonaws.datazone#InternalServerException" + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the data product revision is created.
" + } }, - { - "target": "com.amazonaws.datazone#ResourceNotFoundException" + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The user who created the data product revision.
" + } }, - { - "target": "com.amazonaws.datazone#ThrottlingException" + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the first revision of the data product is created.
" + } }, - { - "target": "com.amazonaws.datazone#ValidationException" + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The user who created the first revision of the data product.
" + } } - ], + }, "traits": { - "smithy.api#documentation": "Create an Amazon DataZone environment.
", - "smithy.api#http": { - "code": 201, - "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environments" - } + "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateEnvironmentAction": { + "com.amazonaws.datazone#CreateDataSource": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateEnvironmentActionInput" + "target": "com.amazonaws.datazone#CreateDataSourceInput" }, "output": { - "target": "com.amazonaws.datazone#CreateEnvironmentActionOutput" + "target": "com.amazonaws.datazone#CreateDataSourceOutput" }, "errors": [ { @@ -2700,6 +2949,9 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -2708,316 +2960,236 @@ } ], "traits": { - "smithy.api#documentation": "Creates an action for the environment, for example, creates a console link for an\n analytics tool that is available in this environment.
", + "smithy.api#documentation": "Creates an Amazon DataZone data source.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions" - } + "uri": "/v2/domains/{domainIdentifier}/data-sources" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateEnvironmentActionInput": { + "com.amazonaws.datazone#CreateDataSourceInput": { "type": "structure", "members": { + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "The name of the data source.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the data source.
" + } + }, "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the environment action is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain where the data source is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "environmentIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentId", + "projectIdentifier": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The ID of the environment in which the environment action is created.
", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "The identifier of the Amazon DataZone project in which you want to add this data\n source.
", "smithy.api#required": {} } }, - "name": { + "environmentIdentifier": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The name of the environment action.
", + "smithy.api#documentation": "The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.
", "smithy.api#required": {} } }, - "parameters": { - "target": "com.amazonaws.datazone#ActionParameters", + "type": { + "target": "com.amazonaws.datazone#DataSourceType", "traits": { - "smithy.api#documentation": "The parameters of the environment action.
", + "smithy.api#documentation": "The type of the data source.
", "smithy.api#required": {} } }, - "description": { - "target": "smithy.api#String", + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationInput", "traits": { - "smithy.api#documentation": "The description of the environment action that is being created in the\n environment.
" + "smithy.api#documentation": "Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration
or redshiftRunConfiguration
.
The ID of the domain in which the environment action is created.
", - "smithy.api#required": {} + "smithy.api#documentation": "Specifies whether the business name generation is to be enabled for this data\n source.
" } }, - "environmentId": { - "target": "com.amazonaws.datazone#EnvironmentId", + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", "traits": { - "smithy.api#documentation": "The ID of the environment in which the environment is created.
", - "smithy.api#required": {} + "smithy.api#documentation": "Specifies whether the data source is enabled.
" } }, - "id": { - "target": "com.amazonaws.datazone#EnvironmentActionId", + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", "traits": { - "smithy.api#documentation": "The ID of the environment action.
", - "smithy.api#required": {} + "smithy.api#documentation": "The schedule of the data source runs.
" } }, - "name": { - "target": "smithy.api#String", + "publishOnImport": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "The name of the environment action.
", - "smithy.api#required": {} + "smithy.api#documentation": "Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.
" } }, - "parameters": { - "target": "com.amazonaws.datazone#ActionParameters", + "assetFormsInput": { + "target": "com.amazonaws.datazone#FormInputList", "traits": { - "smithy.api#documentation": "The parameters of the environment action.
", - "smithy.api#required": {} + "smithy.api#documentation": "The metadata forms that are to be attached to the assets that this data source works\n with.
" } }, - "description": { + "clientToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The description of the environment action.
" + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} } } }, "traits": { - "smithy.api#output": {} + "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateEnvironmentInput": { + "com.amazonaws.datazone#CreateDataSourceOutput": { "type": "structure", "members": { - "projectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "id": { + "target": "com.amazonaws.datazone#DataSourceId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone project in which this environment is created.
", + "smithy.api#documentation": "The unique identifier of the data source.
", "smithy.api#required": {} } }, - "domainIdentifier": { - "target": "com.amazonaws.datazone#DomainId", + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the environment is created.
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "The status of the data source.
" } }, - "description": { - "target": "smithy.api#String", + "type": { + "target": "com.amazonaws.datazone#DataSourceType", "traits": { - "smithy.api#documentation": "The description of the Amazon DataZone environment.
" + "smithy.api#documentation": "The type of the data source.
" } }, "name": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#Name", "traits": { - "smithy.api#documentation": "The name of the Amazon DataZone environment.
", + "smithy.api#documentation": "The name of the data source.
", "smithy.api#required": {} } }, - "environmentProfileIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentProfileId", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The identifier of the environment profile that is used to create this Amazon DataZone\n environment.
", + "smithy.api#documentation": "The description of the data source.
" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the data source is created.
", "smithy.api#required": {} } }, - "userParameters": { - "target": "com.amazonaws.datazone#EnvironmentParametersList", + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The user parameters of this Amazon DataZone environment.
" + "smithy.api#documentation": "The ID of the Amazon DataZone project to which the data source is added.
", + "smithy.api#required": {} } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "The glossary terms that can be used in this Amazon DataZone environment.
" + "smithy.api#documentation": "The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.
", + "smithy.api#required": {} } }, - "environmentAccountIdentifier": { - "target": "smithy.api#String", + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", "traits": { - "smithy.api#documentation": "The ID of the account in which the environment is being created.
" + "smithy.api#documentation": "Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration
or redshiftRunConfiguration
.
The region of the account in which the environment is being created.
" + "smithy.api#documentation": "Specifies whether the business name generation is to be enabled for this data\n source.
" } }, - "environmentBlueprintIdentifier": { - "target": "smithy.api#String", + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", "traits": { - "smithy.api#documentation": "The ID of the blueprint with which the environment is being created.
" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.datazone#CreateEnvironmentOutput": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project in which this environment is created.
", - "smithy.api#required": {} - } - }, - "id": { - "target": "com.amazonaws.datazone#EnvironmentId", - "traits": { - "smithy.api#documentation": "The ID of this Amazon DataZone environment.
" - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", - "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the environment is created.
", - "smithy.api#required": {} - } - }, - "createdBy": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created this environment.
", - "smithy.api#required": {} - } - }, - "createdAt": { - "target": "smithy.api#Timestamp", - "traits": { - "smithy.api#documentation": "The timestamp of when the environment was created.
", - "smithy.api#timestampFormat": "date-time" - } - }, - "updatedAt": { - "target": "smithy.api#Timestamp", - "traits": { - "smithy.api#documentation": "The timestamp of when this environment was updated.
", - "smithy.api#timestampFormat": "date-time" - } - }, - "name": { - "target": "com.amazonaws.datazone#EnvironmentName", - "traits": { - "smithy.api#documentation": "The name of this environment.
", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of this Amazon DataZone environment.
" - } - }, - "environmentProfileId": { - "target": "com.amazonaws.datazone#EnvironmentProfileId", - "traits": { - "smithy.api#addedDefault": {}, - "smithy.api#default": "", - "smithy.api#documentation": "The ID of the environment profile with which this Amazon DataZone environment was\n created.
" - } - }, - "awsAccountId": { - "target": "com.amazonaws.datazone#AwsAccountId", - "traits": { - "smithy.api#documentation": "The Amazon Web Services account in which the Amazon DataZone environment is created.
" - } - }, - "awsAccountRegion": { - "target": "com.amazonaws.datazone#AwsRegion", - "traits": { - "smithy.api#documentation": "The Amazon Web Services region in which the Amazon DataZone environment is created.
" - } - }, - "provider": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The provider of this Amazon DataZone environment.
", - "smithy.api#required": {} + "smithy.api#documentation": "Specifies whether the data source is enabled.
" } }, - "provisionedResources": { - "target": "com.amazonaws.datazone#ResourceList", + "publishOnImport": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "The provisioned resources of this Amazon DataZone environment.
" + "smithy.api#documentation": "Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.
" } }, - "status": { - "target": "com.amazonaws.datazone#EnvironmentStatus", + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", "traits": { - "smithy.api#documentation": "The status of this Amazon DataZone environment.
" + "smithy.api#documentation": "The metadata forms attached to the assets that this data source creates.
" } }, - "environmentActions": { - "target": "com.amazonaws.datazone#EnvironmentActionList", + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", "traits": { - "smithy.api#documentation": "The configurable actions of this Amazon DataZone environment.
" + "smithy.api#documentation": "The schedule of the data source runs.
" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", "traits": { - "smithy.api#documentation": "The glossary terms that can be used in this Amazon DataZone environment.
" + "smithy.api#documentation": "The status of the last run of this data source.
" } }, - "userParameters": { - "target": "com.amazonaws.datazone#CustomParameterList", + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", "traits": { - "smithy.api#documentation": "The user parameters of this Amazon DataZone environment.
" + "smithy.api#documentation": "The timestamp that specifies when the data source was last run.
" } }, - "lastDeployment": { - "target": "com.amazonaws.datazone#Deployment", + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", "traits": { - "smithy.api#documentation": "The details of the last deployment of this Amazon DataZone environment.
" + "smithy.api#documentation": "Specifies the error message that is returned if the operation cannot be successfully\n completed.
" } }, - "provisioningProperties": { - "target": "com.amazonaws.datazone#ProvisioningProperties", + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", "traits": { - "smithy.api#documentation": "The provisioning properties of this Amazon DataZone environment.
" + "smithy.api#documentation": "Specifies the error message that is returned if the operation cannot be successfully\n completed.
" } }, - "deploymentProperties": { - "target": "com.amazonaws.datazone#DeploymentProperties", + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", "traits": { - "smithy.api#documentation": "The deployment properties of this Amazon DataZone environment.
" + "smithy.api#documentation": "The timestamp of when the data source was created.
" } }, - "environmentBlueprintId": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", "traits": { - "smithy.api#documentation": "The ID of the blueprint with which this Amazon DataZone environment was created.
" + "smithy.api#documentation": "The timestamp of when the data source was updated.
" } } }, @@ -3025,13 +3197,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateEnvironmentProfile": { + "com.amazonaws.datazone#CreateDomain": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateEnvironmentProfileInput" + "target": "com.amazonaws.datazone#CreateDomainInput" }, "output": { - "target": "com.amazonaws.datazone#CreateEnvironmentProfileOutput" + "target": "com.amazonaws.datazone#CreateDomainOutput" }, "errors": [ { @@ -3057,68 +3229,64 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon DataZone environment profile.
", + "smithy.api#documentation": "Creates an Amazon DataZone domain.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environment-profiles" - } + "uri": "/v2/domains" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] } }, - "com.amazonaws.datazone#CreateEnvironmentProfileInput": { + "com.amazonaws.datazone#CreateDomainInput": { "type": "structure", "members": { - "domainIdentifier": { - "target": "com.amazonaws.datazone#DomainId", - "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this environment profile is created.
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, "name": { - "target": "com.amazonaws.datazone#EnvironmentProfileName", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The name of this Amazon DataZone environment profile.
", + "smithy.api#documentation": "The name of the Amazon DataZone domain.
", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.datazone#Description", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The description of this Amazon DataZone environment profile.
" + "smithy.api#documentation": "The description of the Amazon DataZone domain.
" } }, - "environmentBlueprintIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", "traits": { - "smithy.api#documentation": "The ID of the blueprint with which this environment profile is created.
", - "smithy.api#required": {} + "smithy.api#documentation": "The single-sign on configuration of the Amazon DataZone domain.
" } }, - "projectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", "traits": { - "smithy.api#documentation": "The identifier of the project in which to create the environment profile.
", + "smithy.api#documentation": "The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.
", "smithy.api#required": {} } }, - "userParameters": { - "target": "com.amazonaws.datazone#EnvironmentParametersList", + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", "traits": { - "smithy.api#documentation": "The user parameters of this Amazon DataZone environment profile.
" + "smithy.api#documentation": "The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.
" } }, - "awsAccountId": { - "target": "com.amazonaws.datazone#AwsAccountId", + "tags": { + "target": "com.amazonaws.datazone#Tags", "traits": { - "smithy.api#documentation": "The Amazon Web Services account in which the Amazon DataZone environment is created.
" + "smithy.api#documentation": "The tags specified for the Amazon DataZone domain.
" } }, - "awsAccountRegion": { - "target": "com.amazonaws.datazone#AwsRegion", + "clientToken": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services region in which this environment profile is created.
" + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} } } }, @@ -3126,86 +3294,68 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateEnvironmentProfileOutput": { + "com.amazonaws.datazone#CreateDomainOutput": { "type": "structure", "members": { "id": { - "target": "com.amazonaws.datazone#EnvironmentProfileId", + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of this Amazon DataZone environment profile.
", + "smithy.api#documentation": "The identifier of the Amazon DataZone domain.
", "smithy.api#required": {} } }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this environment profile is created.
", - "smithy.api#required": {} + "smithy.api#documentation": "The name of the Amazon DataZone domain.
" } }, - "awsAccountId": { - "target": "com.amazonaws.datazone#AwsAccountId", + "description": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services account ID in which this Amazon DataZone environment profile is\n created.
" + "smithy.api#documentation": "The description of the Amazon DataZone domain.
" } }, - "awsAccountRegion": { - "target": "com.amazonaws.datazone#AwsRegion", + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", "traits": { - "smithy.api#documentation": "The Amazon Web Services region in which this Amazon DataZone environment profile is\n created.
" + "smithy.api#documentation": "The single-sign on configuration of the Amazon DataZone domain.
" } }, - "createdBy": { - "target": "smithy.api#String", + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created this environment profile.
", - "smithy.api#required": {} + "smithy.api#documentation": "The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.
" } }, - "createdAt": { - "target": "smithy.api#Timestamp", + "arn": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The timestamp of when this environment profile was created.
", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "The ARN of the Amazon DataZone domain.
" } }, - "updatedAt": { - "target": "smithy.api#Timestamp", + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", "traits": { - "smithy.api#documentation": "The timestamp of when this environment profile was updated.
", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.
" } }, - "name": { - "target": "com.amazonaws.datazone#EnvironmentProfileName", - "traits": { - "smithy.api#documentation": "The name of this Amazon DataZone environment profile.
", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of this Amazon DataZone environment profile.
" - } - }, - "environmentBlueprintId": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "status": { + "target": "com.amazonaws.datazone#DomainStatus", "traits": { - "smithy.api#documentation": "The ID of the blueprint with which this environment profile is created.
", - "smithy.api#required": {} + "smithy.api#documentation": "The status of the Amazon DataZone domain.
" } }, - "projectId": { - "target": "com.amazonaws.datazone#ProjectId", + "portalUrl": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project in which this environment profile is created.
" + "smithy.api#documentation": "The URL of the data portal for this Amazon DataZone domain.
" } }, - "userParameters": { - "target": "com.amazonaws.datazone#CustomParameterList", + "tags": { + "target": "com.amazonaws.datazone#Tags", "traits": { - "smithy.api#documentation": "The user parameters of this Amazon DataZone environment profile.
" + "smithy.api#documentation": "The tags specified for the Amazon DataZone domain.
" } } }, @@ -3213,13 +3363,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateFormType": { + "com.amazonaws.datazone#CreateEnvironment": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateFormTypeInput" + "target": "com.amazonaws.datazone#CreateEnvironmentInput" }, "output": { - "target": "com.amazonaws.datazone#CreateFormTypeOutput" + "target": "com.amazonaws.datazone#CreateEnvironmentOutput" }, "errors": [ { @@ -3232,7 +3382,7 @@ "target": "com.amazonaws.datazone#InternalServerException" }, { - "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + "target": "com.amazonaws.datazone#ResourceNotFoundException" }, { "target": "com.amazonaws.datazone#ThrottlingException" @@ -3242,56 +3392,88 @@ } ], "traits": { - "smithy.api#documentation": "Creates a metadata form type.
", + "smithy.api#documentation": "Create an Amazon DataZone environment.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/form-types" + "uri": "/v2/domains/{domainIdentifier}/environments" } } }, - "com.amazonaws.datazone#CreateFormTypeInput": { + "com.amazonaws.datazone#CreateEnvironmentAction": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateEnvironmentActionInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateEnvironmentActionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Creates an action for the environment, for example, creates a console link for an\n analytics tool that is available in this environment.
", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions" + } + } + }, + "com.amazonaws.datazone#CreateEnvironmentActionInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this metadata form type is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the environment action is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#FormTypeName", + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "The name of this Amazon DataZone metadata form type.
", + "smithy.api#documentation": "The ID of the environment in which the environment action is created.
", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "model": { - "target": "com.amazonaws.datazone#Model", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The model of this Amazon DataZone metadata form type.
", + "smithy.api#documentation": "The name of the environment action.
", "smithy.api#required": {} } }, - "owningProjectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project that owns this metadata form type.
", + "smithy.api#documentation": "The parameters of the environment action.
", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#FormTypeStatus", - "traits": { - "smithy.api#documentation": "The status of this Amazon DataZone metadata form type.
" - } - }, "description": { - "target": "com.amazonaws.datazone#Description", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The description of this Amazon DataZone metadata form type.
" + "smithy.api#documentation": "The description of the environment action that is being created in the\n environment.
" } } }, @@ -3299,52 +3481,48 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateFormTypeOutput": { + "com.amazonaws.datazone#CreateEnvironmentActionOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this metadata form type is created.
", + "smithy.api#documentation": "The ID of the domain in which the environment action is created.
", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#FormTypeName", + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "The name of this Amazon DataZone metadata form type.
", + "smithy.api#documentation": "The ID of the environment in which the environment is created.
", "smithy.api#required": {} } }, - "revision": { - "target": "com.amazonaws.datazone#Revision", + "id": { + "target": "com.amazonaws.datazone#EnvironmentActionId", "traits": { - "smithy.api#documentation": "The revision of this Amazon DataZone metadata form type.
", + "smithy.api#documentation": "The ID of the environment action.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of this Amazon DataZone metadata form type.
" - } - }, - "owningProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The ID of the project that owns this Amazon DataZone metadata form type.
" + "smithy.api#documentation": "The name of the environment action.
", + "smithy.api#required": {} } }, - "originDomainId": { - "target": "com.amazonaws.datazone#DomainId", + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this metadata form type was originally\n created.
" + "smithy.api#documentation": "The parameters of the environment action.
", + "smithy.api#required": {} } }, - "originProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "description": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The ID of the project in which this Amazon DataZone metadata form type was originally\n created.
" + "smithy.api#documentation": "The description of the environment action.
" } } }, @@ -3352,86 +3530,72 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateGlossary": { - "type": "operation", - "input": { - "target": "com.amazonaws.datazone#CreateGlossaryInput" - }, - "output": { - "target": "com.amazonaws.datazone#CreateGlossaryOutput" - }, - "errors": [ - { - "target": "com.amazonaws.datazone#AccessDeniedException" - }, - { - "target": "com.amazonaws.datazone#ConflictException" - }, - { - "target": "com.amazonaws.datazone#InternalServerException" - }, - { - "target": "com.amazonaws.datazone#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.datazone#ThrottlingException" - }, - { - "target": "com.amazonaws.datazone#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "Creates an Amazon DataZone business glossary.
", - "smithy.api#http": { - "code": 201, - "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/glossaries" - }, - "smithy.api#idempotent": {} - } - }, - "com.amazonaws.datazone#CreateGlossaryInput": { + "com.amazonaws.datazone#CreateEnvironmentInput": { "type": "structure", "members": { + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The identifier of the Amazon DataZone project in which this environment is created.
", + "smithy.api#required": {} + } + }, "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary is created.
", + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the environment is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The description of the Amazon DataZone environment.
" + } + }, "name": { - "target": "com.amazonaws.datazone#GlossaryName", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The name of this business glossary.
", + "smithy.api#documentation": "The name of the Amazon DataZone environment.
", "smithy.api#required": {} } }, - "owningProjectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", + "environmentProfileIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "The ID of the project that currently owns business glossary.
", + "smithy.api#documentation": "The identifier of the environment profile that is used to create this Amazon DataZone\n environment.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#GlossaryDescription", + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", "traits": { - "smithy.api#documentation": "The description of this business glossary.
" + "smithy.api#documentation": "The user parameters of this Amazon DataZone environment.
" } }, - "status": { - "target": "com.amazonaws.datazone#GlossaryStatus", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "The status of this business glossary.
" + "smithy.api#documentation": "The glossary terms that can be used in this Amazon DataZone environment.
" } }, - "clientToken": { - "target": "com.amazonaws.datazone#ClientToken", + "environmentAccountIdentifier": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "The ID of the account in which the environment is being created.
" + } + }, + "environmentAccountRegion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The region of the account in which the environment is being created.
" + } + }, + "environmentBlueprintIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the blueprint with which the environment is being created.
" } } }, @@ -3439,47 +3603,142 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateGlossaryOutput": { + "com.amazonaws.datazone#CreateEnvironmentOutput": { "type": "structure", "members": { - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone project in which this environment is created.
", "smithy.api#required": {} } }, "id": { - "target": "com.amazonaws.datazone#GlossaryId", + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "The ID of this business glossary.
", + "smithy.api#documentation": "The ID of this Amazon DataZone environment.
" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the environment is created.
", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#GlossaryName", + "createdBy": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The name of this business glossary.
", + "smithy.api#documentation": "The Amazon DataZone user who created this environment.
", "smithy.api#required": {} } }, - "owningProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "createdAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "The ID of the project that currently owns this business glossary.
", + "smithy.api#documentation": "The timestamp of when the environment was created.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp of when this environment was updated.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentName", + "traits": { + "smithy.api#documentation": "The name of this environment.
", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.datazone#GlossaryDescription", + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The description of this business glossary.
" + "smithy.api#documentation": "The description of this Amazon DataZone environment.
" + } + }, + "environmentProfileId": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "The ID of the environment profile with which this Amazon DataZone environment was\n created.
" + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "The Amazon Web Services account in which the Amazon DataZone environment is created.
" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "The Amazon Web Services region in which the Amazon DataZone environment is created.
" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The provider of this Amazon DataZone environment.
", + "smithy.api#required": {} + } + }, + "provisionedResources": { + "target": "com.amazonaws.datazone#ResourceList", + "traits": { + "smithy.api#documentation": "The provisioned resources of this Amazon DataZone environment.
" } }, "status": { - "target": "com.amazonaws.datazone#GlossaryStatus", + "target": "com.amazonaws.datazone#EnvironmentStatus", "traits": { - "smithy.api#documentation": "The status of this business glossary.
" + "smithy.api#documentation": "The status of this Amazon DataZone environment.
" + } + }, + "environmentActions": { + "target": "com.amazonaws.datazone#EnvironmentActionList", + "traits": { + "smithy.api#documentation": "The configurable actions of this Amazon DataZone environment.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms that can be used in this Amazon DataZone environment.
" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "The user parameters of this Amazon DataZone environment.
" + } + }, + "lastDeployment": { + "target": "com.amazonaws.datazone#Deployment", + "traits": { + "smithy.api#documentation": "The details of the last deployment of this Amazon DataZone environment.
" + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "The provisioning properties of this Amazon DataZone environment.
" + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "The deployment properties of this Amazon DataZone environment.
" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "The ID of the blueprint with which this Amazon DataZone environment was created.
" } } }, @@ -3487,13 +3746,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateGlossaryTerm": { + "com.amazonaws.datazone#CreateEnvironmentProfile": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateGlossaryTermInput" + "target": "com.amazonaws.datazone#CreateEnvironmentProfileInput" }, "output": { - "target": "com.amazonaws.datazone#CreateGlossaryTermOutput" + "target": "com.amazonaws.datazone#CreateEnvironmentProfileOutput" }, "errors": [ { @@ -3519,69 +3778,68 @@ } ], "traits": { - "smithy.api#documentation": "Creates a business glossary term.
", + "smithy.api#documentation": "Creates an Amazon DataZone environment profile.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/glossary-terms" - }, - "smithy.api#idempotent": {} + "uri": "/v2/domains/{domainIdentifier}/environment-profiles" + } } }, - "com.amazonaws.datazone#CreateGlossaryTermInput": { + "com.amazonaws.datazone#CreateEnvironmentProfileInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary term is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this environment profile is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "glossaryIdentifier": { - "target": "com.amazonaws.datazone#GlossaryTermId", + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", "traits": { - "smithy.api#documentation": "The ID of the business glossary in which this term is created.
", + "smithy.api#documentation": "The name of this Amazon DataZone environment profile.
", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#GlossaryTermName", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The name of this business glossary term.
", - "smithy.api#required": {} + "smithy.api#documentation": "The description of this Amazon DataZone environment profile.
" } }, - "status": { - "target": "com.amazonaws.datazone#GlossaryTermStatus", + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", "traits": { - "smithy.api#documentation": "The status of this business glossary term.
" + "smithy.api#documentation": "The ID of the blueprint with which this environment profile is created.
", + "smithy.api#required": {} } }, - "shortDescription": { - "target": "com.amazonaws.datazone#ShortDescription", + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The short description of this business glossary term.
" + "smithy.api#documentation": "The identifier of the project in which to create the environment profile.
", + "smithy.api#required": {} } }, - "longDescription": { - "target": "com.amazonaws.datazone#LongDescription", + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", "traits": { - "smithy.api#documentation": "The long description of this business glossary term.
" + "smithy.api#documentation": "The user parameters of this Amazon DataZone environment profile.
" } }, - "termRelations": { - "target": "com.amazonaws.datazone#TermRelations", + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", "traits": { - "smithy.api#documentation": "The term relations of this business glossary term.
" + "smithy.api#documentation": "The Amazon Web Services account in which the Amazon DataZone environment is created.
" } }, - "clientToken": { - "target": "com.amazonaws.datazone#ClientToken", + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", "traits": { - "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "The Amazon Web Services region in which this environment profile is created.
" } } }, @@ -3589,60 +3847,86 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateGlossaryTermOutput": { + "com.amazonaws.datazone#CreateEnvironmentProfileOutput": { "type": "structure", "members": { "id": { - "target": "com.amazonaws.datazone#GlossaryTermId", + "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "The ID of this business glossary term.
", + "smithy.api#documentation": "The ID of this Amazon DataZone environment profile.
", "smithy.api#required": {} } }, "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary term is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this environment profile is created.
", "smithy.api#required": {} } }, - "glossaryId": { - "target": "com.amazonaws.datazone#GlossaryId", + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", "traits": { - "smithy.api#documentation": "The ID of the business glossary in which this term is created.
", + "smithy.api#documentation": "The Amazon Web Services account ID in which this Amazon DataZone environment profile is\n created.
" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "The Amazon Web Services region in which this Amazon DataZone environment profile is\n created.
" + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The Amazon DataZone user who created this environment profile.
", "smithy.api#required": {} } }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp of when this environment profile was created.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp of when this environment profile was updated.
", + "smithy.api#timestampFormat": "date-time" + } + }, "name": { - "target": "com.amazonaws.datazone#GlossaryTermName", + "target": "com.amazonaws.datazone#EnvironmentProfileName", "traits": { - "smithy.api#documentation": "The name of this business glossary term.
", + "smithy.api#documentation": "The name of this Amazon DataZone environment profile.
", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#GlossaryTermStatus", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The status of this business glossary term.
", - "smithy.api#required": {} + "smithy.api#documentation": "The description of this Amazon DataZone environment profile.
" } }, - "shortDescription": { - "target": "com.amazonaws.datazone#ShortDescription", + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", "traits": { - "smithy.api#documentation": "The short description of this business glossary term.
" + "smithy.api#documentation": "The ID of the blueprint with which this environment profile is created.
", + "smithy.api#required": {} } }, - "longDescription": { - "target": "com.amazonaws.datazone#LongDescription", + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The long description of this business glossary term.
" + "smithy.api#documentation": "The ID of the Amazon DataZone project in which this environment profile is created.
" } }, - "termRelations": { - "target": "com.amazonaws.datazone#TermRelations", + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", "traits": { - "smithy.api#documentation": "The term relations of this business glossary term.
" + "smithy.api#documentation": "The user parameters of this Amazon DataZone environment profile.
" } } }, @@ -3650,64 +3934,85 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateGroupProfile": { + "com.amazonaws.datazone#CreateFormType": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateGroupProfileInput" + "target": "com.amazonaws.datazone#CreateFormTypeInput" }, "output": { - "target": "com.amazonaws.datazone#CreateGroupProfileOutput" + "target": "com.amazonaws.datazone#CreateFormTypeOutput" }, "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, { - "target": "com.amazonaws.datazone#ResourceNotFoundException" + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" }, { "target": "com.amazonaws.datazone#ValidationException" } ], "traits": { - "smithy.api#documentation": "Creates a group profile in Amazon DataZone.
", + "smithy.api#documentation": "Creates a metadata form type.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/group-profiles" - }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "uri": "/v2/domains/{domainIdentifier}/form-types" + } } }, - "com.amazonaws.datazone#CreateGroupProfileInput": { + "com.amazonaws.datazone#CreateFormTypeInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the group profile is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this metadata form type is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "groupIdentifier": { - "target": "com.amazonaws.datazone#GroupIdentifier", + "name": { + "target": "com.amazonaws.datazone#FormTypeName", "traits": { - "smithy.api#documentation": "The identifier of the group for which the group profile is created.
", + "smithy.api#documentation": "The name of this Amazon DataZone metadata form type.
", "smithy.api#required": {} } }, - "clientToken": { - "target": "smithy.api#String", + "model": { + "target": "com.amazonaws.datazone#Model", "traits": { - "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "The model of this Amazon DataZone metadata form type.
", + "smithy.api#required": {} + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone project that owns this metadata form type.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#FormTypeStatus", + "traits": { + "smithy.api#documentation": "The status of this Amazon DataZone metadata form type.
" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of this Amazon DataZone metadata form type.
" } } }, @@ -3715,31 +4020,52 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateGroupProfileOutput": { + "com.amazonaws.datazone#CreateFormTypeOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the group profile is created.
" + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this metadata form type is created.
", + "smithy.api#required": {} } }, - "id": { - "target": "com.amazonaws.datazone#GroupProfileId", + "name": { + "target": "com.amazonaws.datazone#FormTypeName", "traits": { - "smithy.api#documentation": "The identifier of the group profile.
" + "smithy.api#documentation": "The name of this Amazon DataZone metadata form type.
", + "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#GroupProfileStatus", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The status of the group profile.
" + "smithy.api#documentation": "The revision of this Amazon DataZone metadata form type.
", + "smithy.api#required": {} } }, - "groupName": { - "target": "com.amazonaws.datazone#GroupProfileName", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The name of the group for which group profile is created.
" + "smithy.api#documentation": "The description of this Amazon DataZone metadata form type.
" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the project that owns this Amazon DataZone metadata form type.
" + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this metadata form type was originally\n created.
" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the project in which this Amazon DataZone metadata form type was originally\n created.
" } } }, @@ -3747,13 +4073,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateListingChangeSet": { + "com.amazonaws.datazone#CreateGlossary": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateListingChangeSetInput" + "target": "com.amazonaws.datazone#CreateGlossaryInput" }, "output": { - "target": "com.amazonaws.datazone#CreateListingChangeSetOutput" + "target": "com.amazonaws.datazone#CreateGlossaryOutput" }, "errors": [ { @@ -3765,9 +4091,6 @@ { "target": "com.amazonaws.datazone#InternalServerException" }, - { - "target": "com.amazonaws.datazone#ResourceNotFoundException" - }, { "target": "com.amazonaws.datazone#ServiceQuotaExceededException" }, @@ -3779,50 +4102,50 @@ } ], "traits": { - "smithy.api#documentation": "Publishes a listing (a record of an asset at a given time) or removes a listing from the\n catalog.
", + "smithy.api#documentation": "Creates an Amazon DataZone business glossary.
", "smithy.api#http": { - "code": 200, + "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/listings/change-set" - } + "uri": "/v2/domains/{domainIdentifier}/glossaries" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateListingChangeSetInput": { + "com.amazonaws.datazone#CreateGlossaryInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "entityIdentifier": { - "target": "com.amazonaws.datazone#EntityIdentifier", + "name": { + "target": "com.amazonaws.datazone#GlossaryName", "traits": { - "smithy.api#documentation": "The ID of the asset.
", + "smithy.api#documentation": "The name of this business glossary.
", "smithy.api#required": {} } }, - "entityType": { - "target": "com.amazonaws.datazone#EntityType", + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The type of an entity.
", + "smithy.api#documentation": "The ID of the project that currently owns business glossary.
", "smithy.api#required": {} } }, - "entityRevision": { - "target": "com.amazonaws.datazone#Revision", + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", "traits": { - "smithy.api#documentation": "The revision of an asset.
" + "smithy.api#documentation": "The description of this business glossary.
" } }, - "action": { - "target": "com.amazonaws.datazone#ChangeAction", + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", "traits": { - "smithy.api#documentation": "Specifies whether to publish or unpublish a listing.
", - "smithy.api#required": {} + "smithy.api#documentation": "The status of this business glossary.
" } }, "clientToken": { @@ -3837,42 +4160,61 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateListingChangeSetOutput": { + "com.amazonaws.datazone#CreateGlossaryOutput": { "type": "structure", "members": { - "listingId": { - "target": "com.amazonaws.datazone#ListingId", + "domainId": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the listing (a record of an asset at a given time).
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary is created.
", "smithy.api#required": {} } }, - "listingRevision": { - "target": "com.amazonaws.datazone#Revision", + "id": { + "target": "com.amazonaws.datazone#GlossaryId", "traits": { - "smithy.api#documentation": "The revision of a listing.
", + "smithy.api#documentation": "The ID of this business glossary.
", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#ListingStatus", + "name": { + "target": "com.amazonaws.datazone#GlossaryName", "traits": { - "smithy.api#documentation": "Specifies the status of the listing.
", + "smithy.api#documentation": "The name of this business glossary.
", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the project that currently owns this business glossary.
", "smithy.api#required": {} } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "The description of this business glossary.
" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "The status of this business glossary.
" + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateProject": { + "com.amazonaws.datazone#CreateGlossaryTerm": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateProjectInput" + "target": "com.amazonaws.datazone#CreateGlossaryTermInput" }, "output": { - "target": "com.amazonaws.datazone#CreateProjectOutput" + "target": "com.amazonaws.datazone#CreateGlossaryTermOutput" }, "errors": [ { @@ -3898,42 +4240,69 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon DataZone project.
", + "smithy.api#documentation": "Creates a business glossary term.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/projects" - } + "uri": "/v2/domains/{domainIdentifier}/glossary-terms" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#CreateProjectInput": { + "com.amazonaws.datazone#CreateGlossaryTermInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this project is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary term is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, + "glossaryIdentifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "The ID of the business glossary in which this term is created.
", + "smithy.api#required": {} + } + }, "name": { - "target": "com.amazonaws.datazone#ProjectName", + "target": "com.amazonaws.datazone#GlossaryTermName", "traits": { - "smithy.api#documentation": "The name of the Amazon DataZone project.
", + "smithy.api#documentation": "The name of this business glossary term.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", "traits": { - "smithy.api#documentation": "The description of the Amazon DataZone project.
" + "smithy.api#documentation": "The status of this business glossary term.
" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", "traits": { - "smithy.api#documentation": "The glossary terms that can be used in this Amazon DataZone project.
" + "smithy.api#documentation": "The short description of this business glossary term.
" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "The long description of this business glossary term.
" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "The term relations of this business glossary term.
" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} } } }, @@ -3941,15 +4310,76 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateProjectMembership": { - "type": "operation", - "input": { - "target": "com.amazonaws.datazone#CreateProjectMembershipInput" - }, - "output": { - "target": "com.amazonaws.datazone#CreateProjectMembershipOutput" - }, - "errors": [ + "com.amazonaws.datazone#CreateGlossaryTermOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "The ID of this business glossary term.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this business glossary term is created.
", + "smithy.api#required": {} + } + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "The ID of the business glossary in which this term is created.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "The name of this business glossary term.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "The status of this business glossary term.
", + "smithy.api#required": {} + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "The short description of this business glossary term.
" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "The long description of this business glossary term.
" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "The term relations of this business glossary term.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateGroupProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateGroupProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateGroupProfileOutput" + }, + "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, @@ -3964,45 +4394,41 @@ } ], "traits": { - "smithy.api#documentation": "Creates a project membership in Amazon DataZone.
", + "smithy.api#documentation": "Creates a group profile in Amazon DataZone.
", "smithy.api#http": { "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership" - } + "uri": "/v2/domains/{domainIdentifier}/group-profiles" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] } }, - "com.amazonaws.datazone#CreateProjectMembershipInput": { + "com.amazonaws.datazone#CreateGroupProfileInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which project membership is created.
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectIdentifier": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "The ID of the project for which this project membership was created.
", + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the group profile is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "member": { - "target": "com.amazonaws.datazone#Member", + "groupIdentifier": { + "target": "com.amazonaws.datazone#GroupIdentifier", "traits": { - "smithy.api#documentation": "The project member whose project membership was created.
", + "smithy.api#documentation": "The identifier of the group for which the group profile is created.
", "smithy.api#required": {} } }, - "designation": { - "target": "com.amazonaws.datazone#UserDesignation", + "clientToken": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The designation of the project membership.
", - "smithy.api#required": {} + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} } } }, @@ -4010,80 +4436,31 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateProjectMembershipOutput": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.datazone#CreateProjectOutput": { + "com.amazonaws.datazone#CreateGroupProfileOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the project was created.
", - "smithy.api#required": {} + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the group profile is created.
" } }, "id": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project.
", - "smithy.api#required": {} - } - }, - "name": { - "target": "com.amazonaws.datazone#ProjectName", - "traits": { - "smithy.api#documentation": "The name of the project.
", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of the project.
" - } - }, - "projectStatus": { - "target": "com.amazonaws.datazone#ProjectStatus", - "traits": { - "smithy.api#documentation": "The status of the Amazon DataZone project that was created.
" - } - }, - "failureReasons": { - "target": "com.amazonaws.datazone#FailureReasons", - "traits": { - "smithy.api#documentation": "Specifies the error message that is returned if the operation cannot be successfully\n completed.
" - } - }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", - "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the project.
", - "smithy.api#required": {} - } - }, - "createdAt": { - "target": "smithy.api#Timestamp", + "target": "com.amazonaws.datazone#GroupProfileId", "traits": { - "smithy.api#documentation": "The timestamp of when the project was created.
", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "The identifier of the group profile.
" } }, - "lastUpdatedAt": { - "target": "smithy.api#Timestamp", + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", "traits": { - "smithy.api#documentation": "The timestamp of when the project was last updated.
", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "The status of the group profile.
" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "groupName": { + "target": "com.amazonaws.datazone#GroupProfileName", "traits": { - "smithy.api#documentation": "The glossary terms that can be used in the project.
" + "smithy.api#documentation": "The name of the group for which group profile is created.
" } } }, @@ -4091,13 +4468,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateSubscriptionGrant": { + "com.amazonaws.datazone#CreateListingChangeSet": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateSubscriptionGrantInput" + "target": "com.amazonaws.datazone#CreateListingChangeSetInput" }, "output": { - "target": "com.amazonaws.datazone#CreateSubscriptionGrantOutput" + "target": "com.amazonaws.datazone#CreateListingChangeSetOutput" }, "errors": [ { @@ -4112,6 +4489,9 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -4120,54 +4500,54 @@ } ], "traits": { - "smithy.api#documentation": "Creates a subsscription grant in Amazon DataZone.
", + "smithy.api#documentation": "Publishes a listing (a record of an asset at a given time) or removes a listing from the\n catalog.
", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/subscription-grants" + "uri": "/v2/domains/{domainIdentifier}/listings/change-set" } } }, - "com.amazonaws.datazone#CreateSubscriptionGrantInput": { + "com.amazonaws.datazone#CreateListingChangeSetInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription grant is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "environmentIdentifier": { - "target": "com.amazonaws.datazone#EnvironmentId", + "entityIdentifier": { + "target": "com.amazonaws.datazone#EntityIdentifier", "traits": { - "smithy.api#documentation": "The ID of the environment in which the subscription grant is created.
", + "smithy.api#documentation": "The ID of the asset.
", "smithy.api#required": {} } }, - "subscriptionTargetIdentifier": { - "target": "com.amazonaws.datazone#SubscriptionTargetId", + "entityType": { + "target": "com.amazonaws.datazone#EntityType", "traits": { - "smithy.api#documentation": "The ID of the subscription target for which the subscription grant is created.
", + "smithy.api#documentation": "The type of an entity.
", "smithy.api#required": {} } }, - "grantedEntity": { - "target": "com.amazonaws.datazone#GrantedEntityInput", + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The entity to which the subscription is to be granted.
", - "smithy.api#required": {} + "smithy.api#documentation": "The revision of an asset.
" } }, - "assetTargetNames": { - "target": "com.amazonaws.datazone#AssetTargetNames", + "action": { + "target": "com.amazonaws.datazone#ChangeAction", "traits": { - "smithy.api#documentation": "The names of the assets for which the subscription grant is created.
" + "smithy.api#documentation": "Specifies whether to publish or unpublish a listing.
", + "smithy.api#required": {} } }, "clientToken": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#ClientToken", "traits": { "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", "smithy.api#idempotencyToken": {} @@ -4178,95 +4558,42 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateSubscriptionGrantOutput": { + "com.amazonaws.datazone#CreateListingChangeSetOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#SubscriptionGrantId", - "traits": { - "smithy.api#documentation": "The ID of the subscription grant.
", - "smithy.api#required": {} - } - }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "listingId": { + "target": "com.amazonaws.datazone#ListingId", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the subscription grant.
", + "smithy.api#documentation": "The ID of the listing (a record of an asset at a given time).
", "smithy.api#required": {} } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", - "traits": { - "smithy.api#documentation": "The Amazon DataZone user who updated the subscription grant.
" - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription grant is created.
", + "smithy.api#documentation": "The revision of a listing.
", "smithy.api#required": {} } }, - "createdAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "status": { + "target": "com.amazonaws.datazone#ListingStatus", "traits": { - "smithy.api#documentation": "A timestamp of when the subscription grant is created.
", + "smithy.api#documentation": "Specifies the status of the listing.
", "smithy.api#required": {} } - }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", - "traits": { - "smithy.api#documentation": "A timestamp of when the subscription grant was updated.
", - "smithy.api#required": {} - } - }, - "subscriptionTargetId": { - "target": "com.amazonaws.datazone#SubscriptionTargetId", - "traits": { - "smithy.api#documentation": "The ID of the subscription target for which the subscription grant is created.
", - "smithy.api#required": {} - } - }, - "grantedEntity": { - "target": "com.amazonaws.datazone#GrantedEntity", - "traits": { - "smithy.api#documentation": "The entity to which the subscription is granted.
", - "smithy.api#required": {} - } - }, - "status": { - "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", - "traits": { - "smithy.api#documentation": "The status of the subscription grant.
", - "smithy.api#required": {} - } - }, - "assets": { - "target": "com.amazonaws.datazone#SubscribedAssets", - "traits": { - "smithy.api#documentation": "The assets for which the subscription grant is created.
" - } - }, - "subscriptionId": { - "target": "com.amazonaws.datazone#SubscriptionId", - "traits": { - "smithy.api#documentation": "The identifier of the subscription grant.
" - } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateSubscriptionRequest": { + "com.amazonaws.datazone#CreateProject": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateSubscriptionRequestInput" + "target": "com.amazonaws.datazone#CreateProjectInput" }, "output": { - "target": "com.amazonaws.datazone#CreateSubscriptionRequestOutput" + "target": "com.amazonaws.datazone#CreateProjectOutput" }, "errors": [ { @@ -4281,6 +4608,9 @@ { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.datazone#ThrottlingException" }, @@ -4289,51 +4619,42 @@ } ], "traits": { - "smithy.api#documentation": "Creates a subscription request in Amazon DataZone.
", + "smithy.api#documentation": "Creates an Amazon DataZone project.
", "smithy.api#http": { - "code": 200, + "code": 201, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/subscription-requests" + "uri": "/v2/domains/{domainIdentifier}/projects" } } }, - "com.amazonaws.datazone#CreateSubscriptionRequestInput": { + "com.amazonaws.datazone#CreateProjectInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription request is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this project is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "subscribedPrincipals": { - "target": "com.amazonaws.datazone#SubscribedPrincipalInputs", - "traits": { - "smithy.api#documentation": "The Amazon DataZone principals for whom the subscription request is created.
", - "smithy.api#required": {} - } - }, - "subscribedListings": { - "target": "com.amazonaws.datazone#SubscribedListingInputs", + "name": { + "target": "com.amazonaws.datazone#ProjectName", "traits": { - "smithy.api#documentation": "The published asset for which the subscription grant is to be created.
", + "smithy.api#documentation": "The name of the Amazon DataZone project.
", "smithy.api#required": {} } }, - "requestReason": { - "target": "com.amazonaws.datazone#RequestReason", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The reason for the subscription request.
", - "smithy.api#required": {} + "smithy.api#documentation": "The description of the Amazon DataZone project.
" } }, - "clientToken": { - "target": "smithy.api#String", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "The glossary terms that can be used in this Amazon DataZone project.
" } } }, @@ -4341,96 +4662,149 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateSubscriptionRequestOutput": { + "com.amazonaws.datazone#CreateProjectMembership": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateProjectMembershipInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateProjectMembershipOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a project membership in Amazon DataZone.
", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership" + } + } + }, + "com.amazonaws.datazone#CreateProjectMembershipInput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#SubscriptionRequestId", + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the subscription request.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which project membership is created.
", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the subscription request.
", + "smithy.api#documentation": "The ID of the project for which this project membership was created.
", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "member": { + "target": "com.amazonaws.datazone#Member", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who updated the subscription request.
" + "smithy.api#documentation": "The project member whose project membership was created.
", + "smithy.api#required": {} } }, + "designation": { + "target": "com.amazonaws.datazone#UserDesignation", + "traits": { + "smithy.api#documentation": "The designation of the project membership.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateProjectMembershipOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateProjectOutput": { + "type": "structure", + "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in whcih the subscription request is created.
", + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which the project was created.
", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "id": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The status of the subscription request.
", + "smithy.api#documentation": "The ID of the Amazon DataZone project.
", "smithy.api#required": {} } }, - "createdAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "name": { + "target": "com.amazonaws.datazone#ProjectName", "traits": { - "smithy.api#documentation": "A timestamp of when the subscription request is created.
", + "smithy.api#documentation": "The name of the project.
", "smithy.api#required": {} } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The timestamp of when the subscription request was updated.
", - "smithy.api#required": {} + "smithy.api#documentation": "The description of the project.
" } }, - "requestReason": { - "target": "com.amazonaws.datazone#RequestReason", + "projectStatus": { + "target": "com.amazonaws.datazone#ProjectStatus", "traits": { - "smithy.api#documentation": "The reason for the subscription request.
", - "smithy.api#required": {} + "smithy.api#documentation": "The status of the Amazon DataZone project that was created.
" } }, - "subscribedPrincipals": { - "target": "com.amazonaws.datazone#SubscribedPrincipals", + "failureReasons": { + "target": "com.amazonaws.datazone#FailureReasons", "traits": { - "smithy.api#documentation": "The subscribed principals of the subscription request.
", - "smithy.api#length": { - "min": 1, - "max": 1 - }, - "smithy.api#required": {} + "smithy.api#documentation": "Specifies the error message that is returned if the operation cannot be successfully\n completed.
" } }, - "subscribedListings": { - "target": "com.amazonaws.datazone#SubscribedListings", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The published asset for which the subscription grant is to be created.
", - "smithy.api#length": { - "min": 1, - "max": 1 - }, + "smithy.api#documentation": "The Amazon DataZone user who created the project.
", "smithy.api#required": {} } }, - "reviewerId": { - "target": "smithy.api#String", + "createdAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "The ID of the reviewer of the subscription request.
" + "smithy.api#documentation": "The timestamp of when the project was created.
", + "smithy.api#timestampFormat": "date-time" } }, - "decisionComment": { - "target": "com.amazonaws.datazone#DecisionComment", + "lastUpdatedAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "The decision comment of the subscription request.
" + "smithy.api#documentation": "The timestamp of when the project was last updated.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms that can be used in the project.
" } } }, @@ -4438,13 +4812,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateSubscriptionTarget": { + "com.amazonaws.datazone#CreateSubscriptionGrant": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateSubscriptionTargetInput" + "target": "com.amazonaws.datazone#CreateSubscriptionGrantInput" }, "output": { - "target": "com.amazonaws.datazone#CreateSubscriptionTargetOutput" + "target": "com.amazonaws.datazone#CreateSubscriptionGrantOutput" }, "errors": [ { @@ -4467,21 +4841,21 @@ } ], "traits": { - "smithy.api#documentation": "Creates a subscription target in Amazon DataZone.
", + "smithy.api#documentation": "Creates a subsscription grant in Amazon DataZone.
", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets" + "uri": "/v2/domains/{domainIdentifier}/subscription-grants" } } }, - "com.amazonaws.datazone#CreateSubscriptionTargetInput": { + "com.amazonaws.datazone#CreateSubscriptionGrantInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which subscription target is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription grant is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4489,57 +4863,28 @@ "environmentIdentifier": { "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "The ID of the environment in which subscription target is created.
", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "The ID of the environment in which the subscription grant is created.
", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#SubscriptionTargetName", + "subscriptionTargetIdentifier": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", "traits": { - "smithy.api#documentation": "The name of the subscription target.
", + "smithy.api#documentation": "The ID of the subscription target for which the subscription grant is created.
", "smithy.api#required": {} } }, - "type": { - "target": "smithy.api#String", + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntityInput", "traits": { - "smithy.api#documentation": "The type of the subscription target.
", - "smithy.api#required": {} - } - }, - "subscriptionTargetConfig": { - "target": "com.amazonaws.datazone#SubscriptionTargetForms", - "traits": { - "smithy.api#documentation": "The configuration of the subscription target.
", - "smithy.api#required": {} - } - }, - "authorizedPrincipals": { - "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", - "traits": { - "smithy.api#documentation": "The authorized principals of the subscription target.
", - "smithy.api#required": {} - } - }, - "manageAccessRole": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The manage access role that is used to create the subscription target.
", - "smithy.api#required": {} - } - }, - "applicableAssetTypes": { - "target": "com.amazonaws.datazone#ApplicableAssetTypes", - "traits": { - "smithy.api#documentation": "The asset types that can be included in the subscription target.
", + "smithy.api#documentation": "The entity to which the subscription is to be granted.
", "smithy.api#required": {} } }, - "provider": { - "target": "smithy.api#String", + "assetTargetNames": { + "target": "com.amazonaws.datazone#AssetTargetNames", "traits": { - "smithy.api#documentation": "The provider of the subscription target.
" + "smithy.api#documentation": "The names of the assets for which the subscription grant is created.
" } }, "clientToken": { @@ -4554,110 +4899,84 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateSubscriptionTargetOutput": { + "com.amazonaws.datazone#CreateSubscriptionGrantOutput": { "type": "structure", "members": { "id": { - "target": "com.amazonaws.datazone#SubscriptionTargetId", - "traits": { - "smithy.api#documentation": "The ID of the subscription target.
", - "smithy.api#required": {} - } - }, - "authorizedPrincipals": { - "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", - "traits": { - "smithy.api#documentation": "The authorised principals of the subscription target.
", - "smithy.api#required": {} - } - }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", - "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription target was created.
", - "smithy.api#required": {} - } - }, - "projectId": { - "target": "com.amazonaws.datazone#ProjectId", - "traits": { - "smithy.api#documentation": "???
", - "smithy.api#required": {} - } - }, - "environmentId": { - "target": "com.amazonaws.datazone#EnvironmentId", - "traits": { - "smithy.api#documentation": "The ID of the environment in which the subscription target was created.
", - "smithy.api#required": {} - } - }, - "name": { - "target": "com.amazonaws.datazone#SubscriptionTargetName", - "traits": { - "smithy.api#documentation": "The name of the subscription target.
", - "smithy.api#required": {} - } - }, - "type": { - "target": "smithy.api#String", + "target": "com.amazonaws.datazone#SubscriptionGrantId", "traits": { - "smithy.api#documentation": "The type of the subscription target.
", + "smithy.api#documentation": "The ID of the subscription grant.
", "smithy.api#required": {} } }, "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the subscription target.
", + "smithy.api#documentation": "The Amazon DataZone user who created the subscription grant.
", "smithy.api#required": {} } }, "updatedBy": { "target": "com.amazonaws.datazone#UpdatedBy", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who updated the subscription target.
" + "smithy.api#documentation": "The Amazon DataZone user who updated the subscription grant.
" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription grant is created.
", + "smithy.api#required": {} } }, "createdAt": { "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "The timestamp of when the subscription target was created.
", + "smithy.api#documentation": "A timestamp of when the subscription grant is created.
", "smithy.api#required": {} } }, "updatedAt": { "target": "com.amazonaws.datazone#UpdatedAt", "traits": { - "smithy.api#documentation": "The timestamp of when the subscription target was updated.
" + "smithy.api#documentation": "A timestamp of when the subscription grant was updated.
", + "smithy.api#required": {} } }, - "manageAccessRole": { - "target": "smithy.api#String", + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", "traits": { - "smithy.api#documentation": "The manage access role with which the subscription target was created.
", + "smithy.api#documentation": "The ID of the subscription target for which the subscription grant is created.
", "smithy.api#required": {} } }, - "applicableAssetTypes": { - "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", "traits": { - "smithy.api#documentation": "The asset types that can be included in the subscription target.
", + "smithy.api#documentation": "The entity to which the subscription is granted.
", "smithy.api#required": {} } }, - "subscriptionTargetConfig": { - "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", "traits": { - "smithy.api#documentation": "The configuration of the subscription target.
", + "smithy.api#documentation": "The status of the subscription grant.
", "smithy.api#required": {} } }, - "provider": { - "target": "smithy.api#String", + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", "traits": { - "smithy.api#documentation": "The provider of the subscription target.
", - "smithy.api#required": {} + "smithy.api#documentation": "The assets for which the subscription grant is created.
" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, + "smithy.api#documentation": "The identifier of the subscription grant.
" } } }, @@ -4665,63 +4984,73 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#CreateUserProfile": { + "com.amazonaws.datazone#CreateSubscriptionRequest": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#CreateUserProfileInput" + "target": "com.amazonaws.datazone#CreateSubscriptionRequestInput" }, "output": { - "target": "com.amazonaws.datazone#CreateUserProfileOutput" + "target": "com.amazonaws.datazone#CreateSubscriptionRequestOutput" }, "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, { "target": "com.amazonaws.datazone#ResourceNotFoundException" }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, { "target": "com.amazonaws.datazone#ValidationException" } ], "traits": { - "smithy.api#documentation": "Creates a user profile in Amazon DataZone.
", + "smithy.api#documentation": "Creates a subscription request in Amazon DataZone.
", "smithy.api#http": { - "code": 201, + "code": 200, "method": "POST", - "uri": "/v2/domains/{domainIdentifier}/user-profiles" - }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "uri": "/v2/domains/{domainIdentifier}/subscription-requests" + } } }, - "com.amazonaws.datazone#CreateUserProfileInput": { + "com.amazonaws.datazone#CreateSubscriptionRequestInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which a user profile is created.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription request is created.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "userIdentifier": { - "target": "com.amazonaws.datazone#UserIdentifier", + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipalInputs", "traits": { - "smithy.api#documentation": "The identifier of the user for which the user profile is created.
", + "smithy.api#documentation": "The Amazon DataZone principals for whom the subscription request is created.
", "smithy.api#required": {} } }, - "userType": { - "target": "com.amazonaws.datazone#UserType", + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListingInputs", "traits": { - "smithy.api#documentation": "The user type of the user for which the user profile is created.
" + "smithy.api#documentation": "The published asset for which the subscription grant is to be created.
", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "The reason for the subscription request.
", + "smithy.api#required": {} } }, "clientToken": { @@ -4736,221 +5065,859 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#CreateUserProfileOutput": { + "com.amazonaws.datazone#CreateSubscriptionRequestOutput": { "type": "structure", "members": { - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which a user profile is created.
" + "smithy.api#documentation": "The ID of the subscription request.
", + "smithy.api#required": {} } }, - "id": { - "target": "com.amazonaws.datazone#UserProfileId", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The identifier of the user profile.
" + "smithy.api#documentation": "The Amazon DataZone user who created the subscription request.
", + "smithy.api#required": {} } }, - "type": { - "target": "com.amazonaws.datazone#UserProfileType", + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", "traits": { - "smithy.api#documentation": "The type of the user profile.
" + "smithy.api#documentation": "The Amazon DataZone user who updated the subscription request.
" } }, - "status": { - "target": "com.amazonaws.datazone#UserProfileStatus", + "domainId": { + "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The status of the user profile.
" + "smithy.api#documentation": "The ID of the Amazon DataZone domain in whcih the subscription request is created.
", + "smithy.api#required": {} } }, - "details": { - "target": "com.amazonaws.datazone#UserProfileDetails" - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.datazone#CreatedAt": { - "type": "timestamp" - }, - "com.amazonaws.datazone#CreatedBy": { - "type": "string" - }, - "com.amazonaws.datazone#CronString": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - }, - "smithy.api#pattern": "cron\\((\\b[0-5]?[0-9]\\b) (\\b2[0-3]\\b|\\b[0-1]?[0-9]\\b) (.*){1,5} (.*){1,5} (.*){1,5} (.*){1,5}\\)" - } - }, - "com.amazonaws.datazone#CustomParameter": { - "type": "structure", - "members": { - "keyName": { - "target": "smithy.api#String", + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", "traits": { - "smithy.api#documentation": "The key name of the parameter.
", - "smithy.api#pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", + "smithy.api#documentation": "The status of the subscription request.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "The description of the parameter.
" + "smithy.api#documentation": "A timestamp of when the subscription request is created.
", + "smithy.api#required": {} } }, - "fieldType": { - "target": "smithy.api#String", + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", "traits": { - "smithy.api#documentation": "The filed type of the parameter.
", + "smithy.api#documentation": "The timestamp of when the subscription request was updated.
", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "The reason for the subscription request.
", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "The subscribed principals of the subscription request.
", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "The published asset for which the subscription grant is to be created.
", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the reviewer of the subscription request.
" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "The decision comment of the subscription request.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionTarget": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateSubscriptionTargetInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateSubscriptionTargetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a subscription target in Amazon DataZone.
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets" + } + } + }, + "com.amazonaws.datazone#CreateSubscriptionTargetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which subscription target is created.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "The ID of the environment in which subscription target is created.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "The name of the subscription target.
", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of the subscription target.
", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "The configuration of the subscription target.
", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "The authorized principals of the subscription target.
", + "smithy.api#required": {} + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The manage access role that is used to create the subscription target.
", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "The asset types that can be included in the subscription target.
", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The provider of the subscription target.
" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionTargetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "The ID of the subscription target.
", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "The authorised principals of the subscription target.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the subscription target was created.
", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "???
", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "The ID of the environment in which the subscription target was created.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "The name of the subscription target.
", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of the subscription target.
", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The Amazon DataZone user who created the subscription target.
", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "The Amazon DataZone user who updated the subscription target.
" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp of when the subscription target was created.
", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "The timestamp of when the subscription target was updated.
" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The manage access role with which the subscription target was created.
", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "The asset types that can be included in the subscription target.
", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "The configuration of the subscription target.
", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The provider of the subscription target.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateUserProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateUserProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateUserProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a user profile in Amazon DataZone.
", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/user-profiles" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#CreateUserProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which a user profile is created.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userIdentifier": { + "target": "com.amazonaws.datazone#UserIdentifier", + "traits": { + "smithy.api#documentation": "The identifier of the user for which the user profile is created.
", + "smithy.api#required": {} + } + }, + "userType": { + "target": "com.amazonaws.datazone#UserType", + "traits": { + "smithy.api#documentation": "The user type of the user for which the user profile is created.
" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateUserProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The identifier of the Amazon DataZone domain in which a user profile is created.
" + } + }, + "id": { + "target": "com.amazonaws.datazone#UserProfileId", + "traits": { + "smithy.api#documentation": "The identifier of the user profile.
" + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "The type of the user profile.
" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "The status of the user profile.
" + } + }, + "details": { + "target": "com.amazonaws.datazone#UserProfileDetails" + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreatedAt": { + "type": "timestamp" + }, + "com.amazonaws.datazone#CreatedBy": { + "type": "string" + }, + "com.amazonaws.datazone#CronString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "cron\\((\\b[0-5]?[0-9]\\b) (\\b2[0-3]\\b|\\b[0-1]?[0-9]\\b) (.*){1,5} (.*){1,5} (.*){1,5} (.*){1,5}\\)" + } + }, + "com.amazonaws.datazone#CustomParameter": { + "type": "structure", + "members": { + "keyName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The key name of the parameter.
", + "smithy.api#pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the parameter.
" + } + }, + "fieldType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The filed type of the parameter.
", "smithy.api#required": {} } }, "defaultValue": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The default value of the parameter.
" + "smithy.api#documentation": "The default value of the parameter.
" + } + }, + "isEditable": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether the parameter is editable.
" + } + }, + "isOptional": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether the custom parameter is optional.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The details of user parameters of an environment blueprint.
" + } + }, + "com.amazonaws.datazone#CustomParameterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#CustomParameter" + } + }, + "com.amazonaws.datazone#DataAssetActivityStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "PUBLISHING_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHING_FAILED" + } + }, + "SUCCEEDED_CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED_CREATED" + } + }, + "SUCCEEDED_UPDATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED_UPDATED" + } + }, + "SKIPPED_ALREADY_IMPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_ALREADY_IMPORTED" + } + }, + "SKIPPED_ARCHIVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_ARCHIVED" + } + }, + "SKIPPED_NO_ACCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_NO_ACCESS" + } + }, + "UNCHANGED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNCHANGED" + } + } + } + }, + "com.amazonaws.datazone#DataPointIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{0,36}$" + } + }, + "com.amazonaws.datazone#DataProduct": { + "type": "resource", + "identifiers": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "identifier": { + "target": "com.amazonaws.datazone#DataProductId" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#DataProductId" + }, + "revision": { + "target": "com.amazonaws.datazone#Revision" + }, + "name": { + "target": "com.amazonaws.datazone#DataProductName" + }, + "description": { + "target": "com.amazonaws.datazone#DataProductDescription" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms" + }, + "items": { + "target": "com.amazonaws.datazone#DataProductItems" + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList" + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "status": { + "target": "com.amazonaws.datazone#DataProductStatus" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateDataProduct" + }, + "read": { + "target": "com.amazonaws.datazone#GetDataProduct" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteDataProduct" + }, + "operations": [ + { + "target": "com.amazonaws.datazone#CreateDataProductRevision" + } + ] + }, + "com.amazonaws.datazone#DataProductDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DataProductId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#DataProductItem": { + "type": "structure", + "members": { + "itemType": { + "target": "com.amazonaws.datazone#DataProductItemType", + "traits": { + "smithy.api#documentation": "The type of the data product.
", + "smithy.api#required": {} } }, - "isEditable": { - "target": "smithy.api#Boolean", + "identifier": { + "target": "com.amazonaws.datazone#EntityIdentifier", "traits": { - "smithy.api#documentation": "Specifies whether the parameter is editable.
" + "smithy.api#documentation": "The ID of the data product.
", + "smithy.api#required": {} } }, - "isOptional": { - "target": "smithy.api#Boolean", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "Specifies whether the custom parameter is optional.
" + "smithy.api#documentation": "The revision of the data product.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#ItemGlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms of the data product.
" } } }, "traits": { - "smithy.api#documentation": "The details of user parameters of an environment blueprint.
" - } - }, - "com.amazonaws.datazone#CustomParameterList": { - "type": "list", - "member": { - "target": "com.amazonaws.datazone#CustomParameter" + "smithy.api#documentation": "The data product.
" } }, - "com.amazonaws.datazone#DataAssetActivityStatus": { + "com.amazonaws.datazone#DataProductItemType": { "type": "enum", "members": { - "FAILED": { + "ASSET": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "FAILED" + "smithy.api#enumValue": "ASSET" } - }, - "PUBLISHING_FAILED": { - "target": "smithy.api#Unit", + } + } + }, + "com.amazonaws.datazone#DataProductItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataProductItem" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.datazone#DataProductListing": { + "type": "structure", + "members": { + "dataProductId": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#enumValue": "PUBLISHING_FAILED" + "smithy.api#documentation": "The ID of the data product listing.
" } }, - "SUCCEEDED_CREATED": { - "target": "smithy.api#Unit", + "dataProductRevision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#enumValue": "SUCCEEDED_CREATED" + "smithy.api#documentation": "The revision of the data product listing.
" } }, - "SUCCEEDED_UPDATED": { - "target": "smithy.api#Unit", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#enumValue": "SUCCEEDED_UPDATED" + "smithy.api#documentation": "The timestamp at which the data product listing was created.
" } }, - "SKIPPED_ALREADY_IMPORTED": { - "target": "smithy.api#Unit", + "forms": { + "target": "com.amazonaws.datazone#Forms", "traits": { - "smithy.api#enumValue": "SKIPPED_ALREADY_IMPORTED" + "smithy.api#documentation": "The metadata forms of the data product listing.
" } }, - "SKIPPED_ARCHIVED": { - "target": "smithy.api#Unit", + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", "traits": { - "smithy.api#enumValue": "SKIPPED_ARCHIVED" + "smithy.api#documentation": "The glossary terms of the data product listing.
" } }, - "SKIPPED_NO_ACCESS": { - "target": "smithy.api#Unit", + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#enumValue": "SKIPPED_NO_ACCESS" + "smithy.api#documentation": "The ID of the owning project of the data product listing.
" } }, - "UNCHANGED": { - "target": "smithy.api#Unit", + "items": { + "target": "com.amazonaws.datazone#ListingSummaries", "traits": { - "smithy.api#enumValue": "UNCHANGED" + "smithy.api#documentation": "The data assets of the data product listing.
" } } - } - }, - "com.amazonaws.datazone#DataPointIdentifier": { - "type": "string", - "traits": { - "smithy.api#pattern": "^[a-zA-Z0-9_-]{0,36}$" - } - }, - "com.amazonaws.datazone#DataProductDescription": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 4096 - }, - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.datazone#DataProductId": { - "type": "string", + }, "traits": { - "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + "smithy.api#documentation": "The data product listing.
" } }, - "com.amazonaws.datazone#DataProductItem": { + "com.amazonaws.datazone#DataProductListingItem": { "type": "structure", "members": { - "itemId": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "The ID of the listing.
" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the listing.
" + } + }, + "name": { + "target": "com.amazonaws.datazone#DataProductName", + "traits": { + "smithy.api#documentation": "The name of the asset of the data product.
" + } + }, + "entityId": { "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The entity ID of the asset of the asset of the data product.
" } }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the asset of the asset of the data product.
" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset of the asset of the data product.
" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the asset of the data product listing was created.
" + } + }, + "listingCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The timestamp at which the listing was created.
" + } + }, + "listingUpdatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "The user who updated the listing.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms of the asset of the asset of the data product.
" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the owning project of the asset of the data product.
" + } + }, + "additionalAttributes": { + "target": "com.amazonaws.datazone#DataProductListingItemAdditionalAttributes", + "traits": { + "smithy.api#documentation": "The additional attributes of the asset of the data product.
" + } + }, + "items": { + "target": "com.amazonaws.datazone#ListingSummaryItems", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The data of the asset of the data product.
" } } }, "traits": { - "smithy.api#deprecated": { - "message": "This structure is deprecated." - }, - "smithy.api#documentation": "" + "smithy.api#documentation": "The asset of the data product listing.
" } }, - "com.amazonaws.datazone#DataProductItems": { - "type": "list", - "member": { - "target": "com.amazonaws.datazone#DataProductItem" + "com.amazonaws.datazone#DataProductListingItemAdditionalAttributes": { + "type": "structure", + "members": { + "forms": { + "target": "com.amazonaws.datazone#Forms", + "traits": { + "smithy.api#documentation": "The metadata forms of the asset of the data product.
" + } + } }, "traits": { - "smithy.api#deprecated": { - "message": "This structure is deprecated." - }, - "smithy.api#length": { - "min": 0, - "max": 100 - } + "smithy.api#documentation": "The additional attributes of the asset of the data product.
" } }, "com.amazonaws.datazone#DataProductName": { @@ -4963,85 +5930,143 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.datazone#DataProductSummary": { + "com.amazonaws.datazone#DataProductResultItem": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "", + "smithy.api#documentation": "The ID of the domain where the data product lives.
", "smithy.api#required": {} } }, "id": { "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "", + "smithy.api#documentation": "The ID of the data product.
", "smithy.api#required": {} } }, "name": { "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "", + "smithy.api#documentation": "The name of the data product.
", "smithy.api#required": {} } }, "owningProjectId": { "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "", + "smithy.api#documentation": "The ID of the owning project of the data product.
", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The description of the data product.
" } }, "glossaryTerms": { "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The glossary terms of the data product.
" } }, - "dataProductItems": { - "target": "com.amazonaws.datazone#DataProductItems", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the data product was created.
" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The user who created the data product.
" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which first revision of the data product was created.
" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The user who created the first revision of the data product.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The data product.
" + } + }, + "com.amazonaws.datazone#DataProductRevision": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where the data product revision lives.
" + } + }, + "id": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "The ID of the data product revision.
" + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The data product revision.
" } }, "createdAt": { "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The timestamp at which the data product revision was created.
" } }, "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "" + "smithy.api#documentation": "The user who created the data product revision.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The data product revision.
" + } + }, + "com.amazonaws.datazone#DataProductRevisions": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataProductRevision" + } + }, + "com.amazonaws.datazone#DataProductStatus": { + "type": "enum", + "members": { + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "CREATING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "" + "smithy.api#enumValue": "CREATING" } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "CREATE_FAILED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "" + "smithy.api#enumValue": "CREATE_FAILED" } } - }, - "traits": { - "smithy.api#deprecated": { - "message": "This structure is deprecated." - }, - "smithy.api#documentation": "" } }, "com.amazonaws.datazone#DataSource": { @@ -5715,6 +6740,9 @@ { "target": "com.amazonaws.datazone#CancelSubscription" }, + { + "target": "com.amazonaws.datazone#CreateAssetFilter" + }, { "target": "com.amazonaws.datazone#CreateEnvironment" }, @@ -5748,6 +6776,9 @@ { "target": "com.amazonaws.datazone#CreateUserProfile" }, + { + "target": "com.amazonaws.datazone#DeleteAssetFilter" + }, { "target": "com.amazonaws.datazone#DeleteEnvironment" }, @@ -5778,6 +6809,9 @@ { "target": "com.amazonaws.datazone#DisassociateEnvironmentRole" }, + { + "target": "com.amazonaws.datazone#GetAssetFilter" + }, { "target": "com.amazonaws.datazone#GetEnvironment" }, @@ -5787,6 +6821,9 @@ { "target": "com.amazonaws.datazone#GetEnvironmentBlueprint" }, + { + "target": "com.amazonaws.datazone#GetEnvironmentCredentials" + }, { "target": "com.amazonaws.datazone#GetEnvironmentProfile" }, @@ -5820,9 +6857,15 @@ { "target": "com.amazonaws.datazone#GetUserProfile" }, + { + "target": "com.amazonaws.datazone#ListAssetFilters" + }, { "target": "com.amazonaws.datazone#ListAssetRevisions" }, + { + "target": "com.amazonaws.datazone#ListDataProductRevisions" + }, { "target": "com.amazonaws.datazone#ListDataSourceRunActivities" }, @@ -5904,6 +6947,9 @@ { "target": "com.amazonaws.datazone#UntagResource" }, + { + "target": "com.amazonaws.datazone#UpdateAssetFilter" + }, { "target": "com.amazonaws.datazone#UpdateEnvironment" }, @@ -5939,6 +6985,9 @@ { "target": "com.amazonaws.datazone#AssetType" }, + { + "target": "com.amazonaws.datazone#DataProduct" + }, { "target": "com.amazonaws.datazone#DataSource" }, @@ -6419,6 +7468,9 @@ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, @@ -6433,7 +7485,7 @@ } ], "traits": { - "smithy.api#documentation": "Delets an asset in Amazon DataZone.
", + "smithy.api#documentation": "Deletes an asset in Amazon DataZone.
", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -6442,6 +7494,76 @@ "smithy.api#idempotent": {} } }, + "com.amazonaws.datazone#DeleteAssetFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteAssetFilterInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes an asset filter.
", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where you want to delete an asset filter.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter that you want to delete.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.datazone#DeleteAssetInput": { "type": "structure", "members": { @@ -6541,6 +7663,75 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#DeleteDataProduct": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteDataProductInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteDataProductOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes an data product in Amazon DataZone.
", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteDataProductInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the data product is deleted.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "The identifier of the data product that is deleted.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteDataProductOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#DeleteDataSource": { "type": "operation", "input": { @@ -7661,6 +8852,9 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, "smithy.api#documentation": "The identifier of the subsctiption whose subscription grant is to be deleted.
" } } @@ -8402,6 +9596,12 @@ "traits": { "smithy.api#enumValue": "ASSET" } + }, + "DATA_PRODUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_PRODUCT" + } } } }, @@ -8569,6 +9769,12 @@ "smithy.api#documentation": "The timestamp of when the environment blueprint was updated.
", "smithy.api#timestampFormat": "date-time" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "The provisioning configuration of a blueprint.
" + } } }, "traits": { @@ -9011,6 +10217,28 @@ "smithy.api#documentation": "The details of an environment.
" } }, + "com.amazonaws.datazone#EqualToExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The value that might be equal to an expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies whether the value is equal to an expression.
" + } + }, "com.amazonaws.datazone#ErrorMessage": { "type": "string" }, @@ -9145,6 +10373,12 @@ "target": "com.amazonaws.datazone#FilterExpression" } }, + "com.amazonaws.datazone#FilterId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, "com.amazonaws.datazone#FilterList": { "type": "list", "member": { @@ -9157,6 +10391,34 @@ } } }, + "com.amazonaws.datazone#FilterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w -]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FilterStatus": { + "type": "enum", + "members": { + "VALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VALID" + } + }, + "INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID" + } + } + } + }, "com.amazonaws.datazone#FirstName": { "type": "string", "traits": { @@ -9515,30 +10777,65 @@ "min": 0, "max": 10 } - } - }, - "com.amazonaws.datazone#FormsOutputMap": { - "type": "map", - "key": { - "target": "com.amazonaws.datazone#FormName" - }, - "value": { - "target": "com.amazonaws.datazone#FormEntryOutput" - }, + } + }, + "com.amazonaws.datazone#FormsOutputMap": { + "type": "map", + "key": { + "target": "com.amazonaws.datazone#FormName" + }, + "value": { + "target": "com.amazonaws.datazone#FormEntryOutput" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.datazone#GetAsset": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetAssetInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetAssetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], "traits": { - "smithy.api#length": { - "min": 0, - "max": 10 - } + "smithy.api#documentation": "Gets an Amazon DataZone asset.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}" + }, + "smithy.api#readonly": {} } }, - "com.amazonaws.datazone#GetAsset": { + "com.amazonaws.datazone#GetAssetFilter": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#GetAssetInput" + "target": "com.amazonaws.datazone#GetAssetFilterInput" }, "output": { - "target": "com.amazonaws.datazone#GetAssetOutput" + "target": "com.amazonaws.datazone#GetAssetFilterOutput" }, "errors": [ { @@ -9558,15 +10855,126 @@ } ], "traits": { - "smithy.api#documentation": "Gets an Amazon DataZone asset.
", + "smithy.api#documentation": "Gets an asset filter.
", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}" + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}" }, "smithy.api#readonly": {} } }, + "com.amazonaws.datazone#GetAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where you want to get an asset filter.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetAssetFilterOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where you want to get an asset filter.
", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "The name of the asset filter.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset filter.
" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "The status of the asset filter.
" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "The configuration of the asset filter.
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the asset filter was created.
" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The error message that is displayed if the action does not complete successfully.
" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "The column names of the asset filter.
" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The row filter of the asset filter.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#GetAssetInput": { "type": "structure", "members": { @@ -9657,64 +11065,215 @@ "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the asset.
" + "smithy.api#documentation": "The Amazon DataZone user who created the asset.
" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp of when the first revision of the asset was created.
" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The Amazon DataZone user who created the first revision of the asset.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "The business glossary terms attached to the asset.
" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the project that owns the asset.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain to which the asset belongs.
", + "smithy.api#required": {} + } + }, + "listing": { + "target": "com.amazonaws.datazone#AssetListingDetails", + "traits": { + "smithy.api#documentation": "The listing of the asset.
" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "The metadata forms attached to the asset.
", + "smithy.api#required": {} + } + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "The read-only metadata forms attached to the asset.
" + } + }, + "latestTimeSeriesDataPointFormsOutput": { + "target": "com.amazonaws.datazone#TimeSeriesDataPointSummaryFormOutputList", + "traits": { + "smithy.api#documentation": "The latest data point that was imported into the time series form for the asset.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetAssetType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetAssetTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetAssetTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Gets an Amazon DataZone asset type.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/asset-types/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetAssetTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the asset type exists.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "The ID of the asset type.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the asset type.
", + "smithy.api#httpQuery": "revision" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetAssetTypeOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the asset type exists.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "The name of the asset type.
", + "smithy.api#required": {} } }, - "firstRevisionCreatedAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "revision": { + "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The timestamp of when the first revision of the asset was created.
" + "smithy.api#documentation": "The revision of the asset type.
", + "smithy.api#required": {} } }, - "firstRevisionCreatedBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "description": { + "target": "com.amazonaws.datazone#Description", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the first revision of the asset.
" + "smithy.api#documentation": "The description of the asset type.
" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "formsOutput": { + "target": "com.amazonaws.datazone#FormsOutputMap", "traits": { - "smithy.api#documentation": "The business glossary terms attached to the asset.
" + "smithy.api#documentation": "The metadata forms attached to the asset type.
", + "smithy.api#required": {} } }, "owningProjectId": { "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The ID of the project that owns the asset.
", + "smithy.api#documentation": "The ID of the Amazon DataZone project that owns the asset type.
", "smithy.api#required": {} } }, - "domainId": { + "originDomainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain to which the asset belongs.
", - "smithy.api#required": {} + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the asset type was originally created.
" } }, - "listing": { - "target": "com.amazonaws.datazone#AssetListingDetails", + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The listing of the asset.
" + "smithy.api#documentation": "The ID of the Amazon DataZone project in which the asset type was originally created.
" } }, - "formsOutput": { - "target": "com.amazonaws.datazone#FormOutputList", + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "The metadata forms attached to the asset.
", - "smithy.api#required": {} + "smithy.api#documentation": "The timestamp of when the asset type was created.
" } }, - "readOnlyFormsOutput": { - "target": "com.amazonaws.datazone#FormOutputList", + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The read-only metadata forms attached to the asset.
" + "smithy.api#documentation": "The Amazon DataZone user who created the asset type.
" } }, - "latestTimeSeriesDataPointFormsOutput": { - "target": "com.amazonaws.datazone#TimeSeriesDataPointSummaryFormOutputList", + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", "traits": { - "smithy.api#documentation": "The latest data point that was imported into the time series form for the asset.
" + "smithy.api#documentation": "The timestamp of when the asset type was updated.
" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "The Amazon DataZone user that updated the asset type.
" } } }, @@ -9722,13 +11281,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.datazone#GetAssetType": { + "com.amazonaws.datazone#GetDataProduct": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#GetAssetTypeInput" + "target": "com.amazonaws.datazone#GetDataProductInput" }, "output": { - "target": "com.amazonaws.datazone#GetAssetTypeOutput" + "target": "com.amazonaws.datazone#GetDataProductOutput" }, "errors": [ { @@ -9748,30 +11307,30 @@ } ], "traits": { - "smithy.api#documentation": "Gets an Amazon DataZone asset type.
", + "smithy.api#documentation": "Gets the data product.
", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/v2/domains/{domainIdentifier}/asset-types/{identifier}" + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}" }, "smithy.api#readonly": {} } }, - "com.amazonaws.datazone#GetAssetTypeInput": { + "com.amazonaws.datazone#GetDataProductInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the asset type exists.
", + "smithy.api#documentation": "The ID of the domain where the data product lives.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, "identifier": { - "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "The ID of the asset type.
", + "smithy.api#documentation": "The ID of the data product.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9779,7 +11338,7 @@ "revision": { "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The revision of the asset type.
", + "smithy.api#documentation": "The revision of the data product.
", "smithy.api#httpQuery": "revision" } } @@ -9788,84 +11347,98 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#GetAssetTypeOutput": { + "com.amazonaws.datazone#GetDataProductOutput": { "type": "structure", "members": { "domainId": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the asset type exists.
", + "smithy.api#documentation": "The ID of the domain where the data product lives.
", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.datazone#TypeName", + "id": { + "target": "com.amazonaws.datazone#DataProductId", "traits": { - "smithy.api#documentation": "The name of the asset type.
", + "smithy.api#documentation": "The ID of the data product.
", "smithy.api#required": {} } }, "revision": { "target": "com.amazonaws.datazone#Revision", "traits": { - "smithy.api#documentation": "The revision of the asset type.
", + "smithy.api#documentation": "The revision of the data product.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.datazone#Description", + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", "traits": { - "smithy.api#documentation": "The description of the asset type.
" + "smithy.api#documentation": "The ID of the owning project of the data product.
", + "smithy.api#required": {} } }, - "formsOutput": { - "target": "com.amazonaws.datazone#FormsOutputMap", + "name": { + "target": "com.amazonaws.datazone#DataProductName", "traits": { - "smithy.api#documentation": "The metadata forms attached to the asset type.
", + "smithy.api#documentation": "The name of the data product.
", "smithy.api#required": {} } }, - "owningProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "status": { + "target": "com.amazonaws.datazone#DataProductStatus", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project that owns the asset type.
", + "smithy.api#default": "CREATED", + "smithy.api#documentation": "The status of the data product.
", "smithy.api#required": {} } }, - "originDomainId": { - "target": "com.amazonaws.datazone#DomainId", + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone domain in which the asset type was originally created.
" + "smithy.api#documentation": "The description of the data product.
" } }, - "originProjectId": { - "target": "com.amazonaws.datazone#ProjectId", + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", "traits": { - "smithy.api#documentation": "The ID of the Amazon DataZone project in which the asset type was originally created.
" + "smithy.api#documentation": "The glossary terms of the data product.
" + } + }, + "items": { + "target": "com.amazonaws.datazone#DataProductItems", + "traits": { + "smithy.api#documentation": "The data assets of the data product.
" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "The metadata forms of the data product.
" } }, "createdAt": { "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "The timestamp of when the asset type was created.
" + "smithy.api#documentation": "The timestamp at which the data product is created.
" } }, "createdBy": { "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who created the asset type.
" + "smithy.api#documentation": "The user who created the data product.
" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", "traits": { - "smithy.api#documentation": "The timestamp of when the asset type was updated.
" + "smithy.api#documentation": "The timestamp at which the first revision of the data product is created.
" } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", "traits": { - "smithy.api#documentation": "The Amazon DataZone user that updated the asset type.
" + "smithy.api#documentation": "The user who created the first revision of the data product.
" } } }, @@ -10683,27 +12256,167 @@ "smithy.api#documentation": "The timestamp of when this blueprint was upated.
", "smithy.api#timestampFormat": "date-time" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "The provisioning configuration of a blueprint.
", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The identifier of the domain in which this blueprint exists.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "The ID of this Amazon DataZone blueprint.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "The ID of this Amazon DataZone blueprint.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintName", + "traits": { + "smithy.api#documentation": "The name of this Amazon DataZone blueprint.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of this Amazon DataZone blueprint.
" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The provider of this Amazon DataZone blueprint.
", + "smithy.api#required": {} + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "The provisioning properties of this Amazon DataZone blueprint.
", + "smithy.api#required": {} + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "The deployment properties of this Amazon DataZone blueprint.
" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "The user parameters of this blueprint.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms attached to this Amazon DataZone blueprint.
" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "A timestamp of when this blueprint was created.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp of when this blueprint was updated.
", + "smithy.api#timestampFormat": "date-time" + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.datazone#GetEnvironmentBlueprintInput": { + "com.amazonaws.datazone#GetEnvironmentCredentials": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentCredentialsInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentCredentialsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Gets the credentials of an environment in Amazon DataZone.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/credentials" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentCredentialsInput": { "type": "structure", "members": { "domainIdentifier": { "target": "com.amazonaws.datazone#DomainId", "traits": { - "smithy.api#documentation": "The identifier of the domain in which this blueprint exists.
", + "smithy.api#documentation": "The ID of the Amazon DataZone domain in which this environment and its credentials\n exist.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "identifier": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "The ID of this Amazon DataZone blueprint.
", + "smithy.api#documentation": "The ID of the environment whose credentials this operation gets.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10713,78 +12426,38 @@ "smithy.api#input": {} } }, - "com.amazonaws.datazone#GetEnvironmentBlueprintOutput": { + "com.amazonaws.datazone#GetEnvironmentCredentialsOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintId", - "traits": { - "smithy.api#documentation": "The ID of this Amazon DataZone blueprint.
", - "smithy.api#required": {} - } - }, - "name": { - "target": "com.amazonaws.datazone#EnvironmentBlueprintName", - "traits": { - "smithy.api#documentation": "The name of this Amazon DataZone blueprint.
", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.datazone#Description", - "traits": { - "smithy.api#documentation": "The description of this Amazon DataZone blueprint.
" - } - }, - "provider": { + "accessKeyId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The provider of this Amazon DataZone blueprint.
", - "smithy.api#required": {} - } - }, - "provisioningProperties": { - "target": "com.amazonaws.datazone#ProvisioningProperties", - "traits": { - "smithy.api#documentation": "The provisioning properties of this Amazon DataZone blueprint.
", - "smithy.api#required": {} - } - }, - "deploymentProperties": { - "target": "com.amazonaws.datazone#DeploymentProperties", - "traits": { - "smithy.api#documentation": "The deployment properties of this Amazon DataZone blueprint.
" - } - }, - "userParameters": { - "target": "com.amazonaws.datazone#CustomParameterList", - "traits": { - "smithy.api#documentation": "The user parameters of this blueprint.
" + "smithy.api#documentation": "The access key ID of the environment.
" } }, - "glossaryTerms": { - "target": "com.amazonaws.datazone#GlossaryTerms", + "secretAccessKey": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The glossary terms attached to this Amazon DataZone blueprint.
" + "smithy.api#documentation": "The secret access key of the environment credentials.
" } }, - "createdAt": { - "target": "smithy.api#Timestamp", + "sessionToken": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "A timestamp of when this blueprint was created.
", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "The session token of the environment credentials.
" } }, - "updatedAt": { + "expiration": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "The timestamp of when this blueprint was updated.
", + "smithy.api#documentation": "The expiration timestamp of the environment credentials.
", "smithy.api#timestampFormat": "date-time" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#sensitive": {} } }, "com.amazonaws.datazone#GetEnvironmentInput": { @@ -11884,7 +13557,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a listing (a record of an asset at a given time).
", + "smithy.api#documentation": "Gets a listing (a record of an asset at a given time). If you specify a listing version,\n only details that are specific to that version are returned.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -12421,6 +14094,9 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, "smithy.api#documentation": "The identifier of the subscription.
" } } @@ -13579,6 +15255,50 @@ "smithy.api#documentation": "The details of a listing for which a subscription is to be granted.
" } }, + "com.amazonaws.datazone#GreaterThanExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The value that might be greater than an expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies whether the value is greater than an expression.
" + } + }, + "com.amazonaws.datazone#GreaterThanOrEqualToExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The value that might be greater than or equal to an expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies whether the value is greater than or equal to an expression.
" + } + }, "com.amazonaws.datazone#GroupDetails": { "type": "structure", "members": { @@ -13710,97 +15430,253 @@ } }, "traits": { - "smithy.api#documentation": "The details of an IAM user profile in Amazon DataZone.
" + "smithy.api#documentation": "The details of an IAM user profile in Amazon DataZone.
" + } + }, + "com.amazonaws.datazone#Import": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "The name of the import.
", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the import.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The details of the import of the metadata form type.
" + } + }, + "com.amazonaws.datazone#ImportList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#Import" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.datazone#InExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.datazone#StringList", + "traits": { + "smithy.api#documentation": "The values that might be in the expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies whether values are in the expression.
" + } + }, + "com.amazonaws.datazone#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The request has failed because of an unknown error, exception or failure.
", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.datazone#InventorySearchScope": { + "type": "enum", + "members": { + "ASSET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSET" + } + }, + "GLOSSARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOSSARY" + } + }, + "GLOSSARY_TERM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOSSARY_TERM" + } + }, + "DATA_PRODUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_PRODUCT" + } + } + } + }, + "com.amazonaws.datazone#IsNotNullExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies that the expression is not null.
" + } + }, + "com.amazonaws.datazone#IsNullExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies that the expression is null.
" + } + }, + "com.amazonaws.datazone#ItemGlossaryTerms": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#GlossaryTermId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.datazone#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" + } + }, + "com.amazonaws.datazone#LakeFormationConfiguration": { + "type": "structure", + "members": { + "locationRegistrationRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "The role that is used to manage read/write access to the chosen Amazon S3 bucket(s) for\n Data Lake using AWS Lake Formation hybrid access mode.
" + } + }, + "locationRegistrationExcludeS3Locations": { + "target": "com.amazonaws.datazone#S3LocationList", + "traits": { + "smithy.api#documentation": "Specifies certain Amazon S3 locations if you do not want Amazon DataZone to\n automatically register them in hybrid mode.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The Lake Formation configuration of the Data Lake blueprint.
" + } + }, + "com.amazonaws.datazone#LastName": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} } }, - "com.amazonaws.datazone#Import": { + "com.amazonaws.datazone#LessThanExpression": { "type": "structure", "members": { - "name": { - "target": "com.amazonaws.datazone#FormTypeName", + "columnName": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The name of the import.
", + "smithy.api#documentation": "The name of the column.
", "smithy.api#required": {} } }, - "revision": { - "target": "com.amazonaws.datazone#Revision", + "value": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The revision of the import.
", + "smithy.api#documentation": "The value that might be less than the expression.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The details of the import of the metadata form type.
" - } - }, - "com.amazonaws.datazone#ImportList": { - "type": "list", - "member": { - "target": "com.amazonaws.datazone#Import" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 10 - } + "smithy.api#documentation": "Specifies that a value is less than an expression.
" } }, - "com.amazonaws.datazone#InternalServerException": { + "com.amazonaws.datazone#LessThanOrEqualToExpression": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.datazone#ErrorMessage", + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", "traits": { + "smithy.api#documentation": "The value that might be less than or equal to an expression.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The request has failed because of an unknown error, exception or failure.
", - "smithy.api#error": "server", - "smithy.api#httpError": 500, - "smithy.api#retryable": {} + "smithy.api#documentation": "Specifies that a value is less than or equal to an expression.
" } }, - "com.amazonaws.datazone#InventorySearchScope": { - "type": "enum", + "com.amazonaws.datazone#LikeExpression": { + "type": "structure", "members": { - "ASSET": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ASSET" - } - }, - "GLOSSARY": { - "target": "smithy.api#Unit", + "columnName": { + "target": "smithy.api#String", "traits": { - "smithy.api#enumValue": "GLOSSARY" + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} } }, - "GLOSSARY_TERM": { - "target": "smithy.api#Unit", + "value": { + "target": "smithy.api#String", "traits": { - "smithy.api#enumValue": "GLOSSARY_TERM" + "smithy.api#documentation": "The value that might be like the expression.
", + "smithy.api#required": {} } } - } - }, - "com.amazonaws.datazone#KmsKeyArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1024 - }, - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" - } - }, - "com.amazonaws.datazone#LastName": { - "type": "string", + }, "traits": { - "smithy.api#sensitive": {} + "smithy.api#documentation": "Specifies that a value is like the expression.
" } }, "com.amazonaws.datazone#LineageEvent": { @@ -14012,6 +15888,113 @@ "smithy.api#documentation": "The details of a data lineage node type.
" } }, + "com.amazonaws.datazone#ListAssetFilters": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListAssetFiltersInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListAssetFiltersOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Lists asset filters.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListAssetFiltersInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where you want to list asset filters.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "The status of the asset filter.
", + "smithy.api#httpQuery": "status" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "When the number of asset filters is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of asset filters, the response\n includes a pagination token named NextToken
. You can specify this\n NextToken
value in a subsequent call to ListAssetFilters
to\n list the next set of asset filters.
The maximum number of asset filters to return in a single call to\n ListAssetFilters
. When the number of asset filters to be listed is greater\n than the value of MaxResults
, the response contains a NextToken
\n value that you can use in a subsequent call to ListAssetFilters
to list the\n next set of asset filters.
The results of the ListAssetFilters
action.
When the number of asset filters is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of asset filters, the response\n includes a pagination token named NextToken
. You can specify this\n NextToken
value in a subsequent call to ListAssetFilters
to\n list the next set of asset filters.
The maximum number of revisions to return in a single call to\n ListAssetRevisions
. When the number of revisions to be listed is greater\n than the value of MaxResults
, the response contains a NextToken
\n value that you can use in a subsequent call to ListAssetRevisions
to list the\n next set of revisions.
The maximum number of revisions to return in a single call to\n ListAssetRevisions
. When the number of revisions to be listed is greater\n than the value of MaxResults
, the response contains a NextToken
\n value that you can use in a subsequent call to ListAssetRevisions
to list the\n next set of revisions.
The results of the ListAssetRevisions
action.
When the number of revisions is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of revisions, the response includes\n a pagination token named NextToken
. You can specify this\n NextToken
value in a subsequent call to ListAssetRevisions
to\n list the next set of revisions.
Lists data product revisions.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListDataProductRevisionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain of the data product revisions that you want to list.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "The ID of the data product revision.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of asset filters to return in a single call to\n ListDataProductRevisions
. When the number of data product revisions to be\n listed is greater than the value of MaxResults
, the response contains a\n NextToken
value that you can use in a subsequent call to\n ListDataProductRevisions
to list the next set of data product\n revisions.
When the number of data product revisions is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of data product revisions, the\n response includes a pagination token named NextToken
. You can specify this\n NextToken
value in a subsequent call to\n ListDataProductRevisions
to list the next set of data product\n revisions.
The results of the ListAssetRevisions
action.
The results of the ListDataProductRevisions
action.
When the number of revisions is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of revisions, the response includes\n a pagination token named NextToken
. You can specify this\n NextToken
value in a subsequent call to ListAssetRevisions
to\n list the next set of revisions.
When the number of data product revisions is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of data product revisions, the\n response includes a pagination token named NextToken
. You can specify this\n NextToken
value in a subsequent call to\n ListDataProductRevisions
to list the next set of data product\n revisions.
The ID of the owning project of the subscription grants.
", + "smithy.api#httpQuery": "owningProjectId" + } + }, "sortBy": { "target": "com.amazonaws.datazone#SortKey", "traits": { @@ -15900,7 +17990,7 @@ "status": { "target": "com.amazonaws.datazone#SubscriptionRequestStatus", "traits": { - "smithy.api#documentation": "Specifies the status of the subscription requests.
", + "smithy.api#documentation": "Specifies the status of the subscription requests.
\nThis is not a required parameter, but if not specified, by default, Amazon DataZone\n returns only PENDING
subscription requests.
The status of the subscriptions that you want to list.
", + "smithy.api#documentation": "The status of the subscriptions that you want to list.
\nThis is not a required parameter, but if not provided, by default, Amazon DataZone\n returns only APPROVED
subscriptions.
An asset published in an Amazon DataZone catalog.
" } + }, + "dataProductListing": { + "target": "com.amazonaws.datazone#DataProductListing", + "traits": { + "smithy.api#documentation": "The data product listing.
" + } } }, "traits": { @@ -16569,6 +18665,70 @@ } } }, + "com.amazonaws.datazone#ListingSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ListingSummary" + } + }, + "com.amazonaws.datazone#ListingSummary": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "The ID of the data product listing.
" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the data product listing.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms of the data product.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The summary of the listing of the data product.
" + } + }, + "com.amazonaws.datazone#ListingSummaryItem": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "The ID of the data product listing.
" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the data product listing.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms of the data product listing.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The results of the data product summary.
" + } + }, + "com.amazonaws.datazone#ListingSummaryItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ListingSummaryItem" + } + }, "com.amazonaws.datazone#LongDescription": { "type": "string", "traits": { @@ -16887,6 +19047,72 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.datazone#NotEqualToExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The value that might not be equal to the expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies that a value is not equal to the expression.
" + } + }, + "com.amazonaws.datazone#NotInExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.datazone#StringList", + "traits": { + "smithy.api#documentation": "The value that might not be in the expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies that a value is not in the expression.
" + } + }, + "com.amazonaws.datazone#NotLikeExpression": { + "type": "structure", + "members": { + "columnName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the column.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The value that might not be like the expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies that a value might be not like the expression.
" + } + }, "com.amazonaws.datazone#NotificationOutput": { "type": "structure", "members": { @@ -17463,6 +19689,26 @@ "smithy.api#documentation": "The details of a Amazon DataZone project.
" } }, + "com.amazonaws.datazone#ProvisioningConfiguration": { + "type": "union", + "members": { + "lakeFormationConfiguration": { + "target": "com.amazonaws.datazone#LakeFormationConfiguration", + "traits": { + "smithy.api#documentation": "The Lake Formation configuration of the Data Lake blueprint.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The provisioning configuration of the blueprint.
" + } + }, + "com.amazonaws.datazone#ProvisioningConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ProvisioningConfiguration" + } + }, "com.amazonaws.datazone#ProvisioningProperties": { "type": "union", "members": { @@ -17558,6 +19804,13 @@ "traits": { "smithy.api#documentation": "The regional parameters in the environment blueprint.
" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "The provisioning configuration of a blueprint.
", + "smithy.api#notProperty": {} + } } }, "traits": { @@ -17618,6 +19871,13 @@ "smithy.api#documentation": "The timestamp of when the environment blueprint was updated.
", "smithy.api#timestampFormat": "date-time" } + }, + "provisioningConfigurations": { + "target": "com.amazonaws.datazone#ProvisioningConfigurationList", + "traits": { + "smithy.api#documentation": "The provisioning configuration of a blueprint.
", + "smithy.api#notProperty": {} + } } }, "traits": { @@ -18384,89 +20644,223 @@ "com.amazonaws.datazone#RevokeSubscriptionOutput": { "type": "structure", "members": { - "id": { - "target": "com.amazonaws.datazone#SubscriptionId", + "id": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "The identifier of the revoked subscription.
", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "The identifier of the user who revoked the subscription.
", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "The Amazon DataZone user who revoked the subscription.
" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The identifier of the Amazon DataZone domain where you want to revoke a subscription.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "The status of the revoked subscription.
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp of when the subscription was revoked.
", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "The timestamp of when the subscription was revoked.
", + "smithy.api#required": {} + } + }, + "subscribedPrincipal": { + "target": "com.amazonaws.datazone#SubscribedPrincipal", + "traits": { + "smithy.api#documentation": "The subscribed principal of the revoked subscription.
", + "smithy.api#required": {} + } + }, + "subscribedListing": { + "target": "com.amazonaws.datazone#SubscribedListing", + "traits": { + "smithy.api#documentation": "The subscribed listing of the revoked subscription.
", + "smithy.api#required": {} + } + }, + "subscriptionRequestId": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "The identifier of the subscription request for the revoked subscription.
" + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether permissions are retained when the subscription is revoked.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#RoleArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$" + } + }, + "com.amazonaws.datazone#RowFilter": { + "type": "union", + "members": { + "expression": { + "target": "com.amazonaws.datazone#RowFilterExpression", + "traits": { + "smithy.api#documentation": "The expression of the row filter.
" + } + }, + "and": { + "target": "com.amazonaws.datazone#RowFilterList", + "traits": { + "smithy.api#documentation": "The 'and' clause of the row filter.
" + } + }, + "or": { + "target": "com.amazonaws.datazone#RowFilterList", + "traits": { + "smithy.api#documentation": "The 'or' clause of the row filter.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The row filter.
" + } + }, + "com.amazonaws.datazone#RowFilterConfiguration": { + "type": "structure", + "members": { + "rowFilter": { + "target": "com.amazonaws.datazone#RowFilter", + "traits": { + "smithy.api#documentation": "The row filter.
", + "smithy.api#required": {} + } + }, + "sensitive": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#default": true, + "smithy.api#documentation": "Specifies whether the row filter is sensitive.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The row filter configuration details.
" + } + }, + "com.amazonaws.datazone#RowFilterExpression": { + "type": "union", + "members": { + "equalTo": { + "target": "com.amazonaws.datazone#EqualToExpression", "traits": { - "smithy.api#documentation": "The identifier of the revoked subscription.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'equal to' clause of the row filter expression.
" } }, - "createdBy": { - "target": "com.amazonaws.datazone#CreatedBy", + "notEqualTo": { + "target": "com.amazonaws.datazone#NotEqualToExpression", "traits": { - "smithy.api#documentation": "The identifier of the user who revoked the subscription.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'no equal to' clause of the row filter expression.
" } }, - "updatedBy": { - "target": "com.amazonaws.datazone#UpdatedBy", + "greaterThan": { + "target": "com.amazonaws.datazone#GreaterThanExpression", "traits": { - "smithy.api#documentation": "The Amazon DataZone user who revoked the subscription.
" + "smithy.api#documentation": "The 'greater than' clause of the row filter expression.
" } }, - "domainId": { - "target": "com.amazonaws.datazone#DomainId", + "lessThan": { + "target": "com.amazonaws.datazone#LessThanExpression", "traits": { - "smithy.api#documentation": "The identifier of the Amazon DataZone domain where you want to revoke a subscription.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'less than' clause of the row filter expression.
" } }, - "status": { - "target": "com.amazonaws.datazone#SubscriptionStatus", + "greaterThanOrEqualTo": { + "target": "com.amazonaws.datazone#GreaterThanOrEqualToExpression", "traits": { - "smithy.api#documentation": "The status of the revoked subscription.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'greater than or equal to' clause of the filter expression.
" } }, - "createdAt": { - "target": "com.amazonaws.datazone#CreatedAt", + "lessThanOrEqualTo": { + "target": "com.amazonaws.datazone#LessThanOrEqualToExpression", "traits": { - "smithy.api#documentation": "The timestamp of when the subscription was revoked.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'less than or equal to' clause of the row filter expression.
" } }, - "updatedAt": { - "target": "com.amazonaws.datazone#UpdatedAt", + "isNull": { + "target": "com.amazonaws.datazone#IsNullExpression", "traits": { - "smithy.api#documentation": "The timestamp of when the subscription was revoked.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'is null' clause of the row filter expression.
" } }, - "subscribedPrincipal": { - "target": "com.amazonaws.datazone#SubscribedPrincipal", + "isNotNull": { + "target": "com.amazonaws.datazone#IsNotNullExpression", "traits": { - "smithy.api#documentation": "The subscribed principal of the revoked subscription.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'is not null' clause of the row filter expression.
" } }, - "subscribedListing": { - "target": "com.amazonaws.datazone#SubscribedListing", + "in": { + "target": "com.amazonaws.datazone#InExpression", "traits": { - "smithy.api#documentation": "The subscribed listing of the revoked subscription.
", - "smithy.api#required": {} + "smithy.api#documentation": "The 'in' clause of the row filter expression.
" } }, - "subscriptionRequestId": { - "target": "com.amazonaws.datazone#SubscriptionRequestId", + "notIn": { + "target": "com.amazonaws.datazone#NotInExpression", "traits": { - "smithy.api#documentation": "The identifier of the subscription request for the revoked subscription.
" + "smithy.api#documentation": "The 'not in' clause of the row filter expression.
" } }, - "retainPermissions": { - "target": "smithy.api#Boolean", + "like": { + "target": "com.amazonaws.datazone#LikeExpression", "traits": { - "smithy.api#documentation": "Specifies whether permissions are retained when the subscription is revoked.
" + "smithy.api#documentation": "The 'like' clause of the row filter expression.
" + } + }, + "notLike": { + "target": "com.amazonaws.datazone#NotLikeExpression", + "traits": { + "smithy.api#documentation": "The 'not like' clause of the row filter expression.
" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "The row filter expression.
" } }, - "com.amazonaws.datazone#RoleArn": { - "type": "string", - "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$" + "com.amazonaws.datazone#RowFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#RowFilter" } }, "com.amazonaws.datazone#RunStatisticsForAssets": { @@ -18507,6 +20901,28 @@ "smithy.api#documentation": "The asset statistics from the data source run.
" } }, + "com.amazonaws.datazone#S3Location": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^s3://.+$" + } + }, + "com.amazonaws.datazone#S3LocationList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#S3Location" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.datazone#ScheduleConfiguration": { "type": "structure", "members": { @@ -18786,12 +21202,9 @@ } }, "dataProductItem": { - "target": "com.amazonaws.datazone#DataProductSummary", + "target": "com.amazonaws.datazone#DataProductResultItem", "traits": { - "smithy.api#deprecated": { - "message": "This field is deprecated." - }, - "smithy.api#documentation": "The data product item included in the search results.
" + "smithy.api#documentation": "The data product.
" } } }, @@ -18986,6 +21399,12 @@ "traits": { "smithy.api#documentation": "The asset listing included in the results of the SearchListings
\n action.
The data product listing.
" + } } }, "traits": { @@ -19833,6 +22252,12 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#StringList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, "com.amazonaws.datazone#SubscribedAsset": { "type": "structure", "members": { @@ -20020,6 +22445,12 @@ "traits": { "smithy.api#documentation": "The asset for which the subscription grant is created.
" } + }, + "productListing": { + "target": "com.amazonaws.datazone#SubscribedProductListing", + "traits": { + "smithy.api#documentation": "The data product listing.
" + } } }, "traits": { @@ -20078,6 +22509,50 @@ "target": "com.amazonaws.datazone#SubscribedPrincipal" } }, + "com.amazonaws.datazone#SubscribedProductListing": { + "type": "structure", + "members": { + "entityId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data product listing.
" + } + }, + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "The revision of the data product listing.
" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "The glossary terms of the data product listing.
" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the data product listing.
" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The description of the data product listing.
" + } + }, + "assetListings": { + "target": "com.amazonaws.datazone#AssetInDataProductListingItems", + "traits": { + "smithy.api#documentation": "The data assets of the data product listing.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The data product listing.
" + } + }, "com.amazonaws.datazone#SubscribedProject": { "type": "structure", "members": { @@ -20292,7 +22767,10 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { - "smithy.api#documentation": "The ID of the subscription grant.
" + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, + "smithy.api#documentation": "The ID of the subscription.
" } } }, @@ -21600,6 +24078,173 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#UpdateAssetFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateAssetFilterInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateAssetFilterOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates an asset filter.
", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateAssetFilterInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where you want to update an asset filter.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the asset filter.
" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset filter.
" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "The configuration of the asset filter.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateAssetFilterOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#FilterId", + "traits": { + "smithy.api#documentation": "The ID of the asset filter.
", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "The ID of the domain where the asset filter was created.
", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the data asset.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FilterName", + "traits": { + "smithy.api#documentation": "The name of the asset filter.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "The description of the asset filter.
" + } + }, + "status": { + "target": "com.amazonaws.datazone#FilterStatus", + "traits": { + "smithy.api#documentation": "The status of the asset filter.
" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#AssetFilterConfiguration", + "traits": { + "smithy.api#documentation": "The configuration of the asset filter.
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "The timestamp at which the asset filter was created.
" + } + }, + "errorMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The error message that is displayed if the action is not completed successfully.
" + } + }, + "effectiveColumnNames": { + "target": "com.amazonaws.datazone#ColumnNameList", + "traits": { + "smithy.api#documentation": "The column names of the asset filter.
" + } + }, + "effectiveRowFilter": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The row filter of the asset filter.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#UpdateDataSource": { "type": "operation", "input": { @@ -23254,6 +25899,9 @@ "subscriptionId": { "target": "com.amazonaws.datazone#SubscriptionId", "traits": { + "smithy.api#deprecated": { + "message": "Multiple subscriptions can exist for a single grant" + }, "smithy.api#documentation": "The identifier of the subscription.
" } } diff --git a/models/docdb.json b/models/docdb.json index 5dd7131697..bad56a44ce 100644 --- a/models/docdb.json +++ b/models/docdb.json @@ -245,6 +245,9 @@ { "target": "com.amazonaws.docdb#FailoverDBCluster" }, + { + "target": "com.amazonaws.docdb#FailoverGlobalCluster" + }, { "target": "com.amazonaws.docdb#ListTagsForResource" }, @@ -2812,6 +2815,16 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.docdb#DBClusterIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[A-Za-z][0-9A-Za-z-:._]*$" + } + }, "com.amazonaws.docdb#DBClusterList": { "type": "list", "member": { @@ -5765,6 +5778,79 @@ "smithy.api#output": {} } }, + "com.amazonaws.docdb#FailoverGlobalCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.docdb#FailoverGlobalClusterMessage" + }, + "output": { + "target": "com.amazonaws.docdb#FailoverGlobalClusterResult" + }, + "errors": [ + { + "target": "com.amazonaws.docdb#DBClusterNotFoundFault" + }, + { + "target": "com.amazonaws.docdb#GlobalClusterNotFoundFault" + }, + { + "target": "com.amazonaws.docdb#InvalidDBClusterStateFault" + }, + { + "target": "com.amazonaws.docdb#InvalidGlobalClusterStateFault" + } + ], + "traits": { + "smithy.api#documentation": "Promotes the specified secondary DB cluster to be the primary DB cluster in the global cluster when failing over a global cluster occurs.
\nUse this operation to respond to an unplanned event, such as a regional disaster in the primary region. \n Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. \n However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state.
" + } + }, + "com.amazonaws.docdb#FailoverGlobalClusterMessage": { + "type": "structure", + "members": { + "GlobalClusterIdentifier": { + "target": "com.amazonaws.docdb#GlobalClusterIdentifier", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The identifier of the Amazon DocumentDB global cluster to apply this operation. \n The identifier is the unique key assigned by the user when the cluster is created. \n In other words, it's the name of the global cluster.
\nConstraints:
\nMust match the identifier of an existing global cluster.
\nMinimum length of 1. Maximum length of 255.
\nPattern: [A-Za-z][0-9A-Za-z-:._]*
\n
The identifier of the secondary Amazon DocumentDB cluster that you want to promote to the primary for the global cluster. \n Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.
\nConstraints:
\nMust match the identifier of an existing secondary cluster.
\nMinimum length of 1. Maximum length of 255.
\nPattern: [A-Za-z][0-9A-Za-z-:._]*
\n
Specifies whether to allow data loss for this global cluster operation. Allowing data loss triggers a global failover operation.
\nIf you don't specify AllowDataLoss
, the global cluster operation defaults to a switchover.
Constraints:
\nCan't be specified together with the Switchover
parameter.
Specifies whether to switch over this global database cluster.
\nConstraints:
\nCan't be specified together with the AllowDataLoss
parameter.
The identifier of the secondary Amazon DocumentDB cluster to promote to the new primary for the global database cluster. \n Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.
\nConstraints:
\nMust match the identifier of an existing secondary cluster.
\nMinimum length of 1. Maximum length of 255.
\nPattern: [A-Za-z][0-9A-Za-z-:._]*
\n
This operation allows you to perform batch reads or writes on data stored in DynamoDB,\n using PartiQL. Each read statement in a BatchExecuteStatement
must specify\n an equality condition on all key attributes. This enforces that each SELECT
\n statement in a batch returns at most a single item.
The entire batch must consist of either read statements or write statements, you\n cannot mix both in one batch.
\nA HTTP 200 response does not mean that all statements in the BatchExecuteStatement\n succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse
for each\n statement.
This operation allows you to perform batch reads or writes on data stored in DynamoDB,\n using PartiQL. Each read statement in a BatchExecuteStatement
must specify\n an equality condition on all key attributes. This enforces that each SELECT
\n statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB\n .
The entire batch must consist of either read statements or write statements, you\n cannot mix both in one batch.
\nA HTTP 200 response does not mean that all statements in the BatchExecuteStatement\n succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse
for each\n statement.
The response to each PartiQL statement in the batch. The values of the list are \n ordered according to the ordering of the request statements.
" + "smithy.api#documentation": "The response to each PartiQL statement in the batch. The values of the list are\n ordered according to the ordering of the request statements.
" } }, "ConsumedCapacity": { @@ -850,7 +850,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "The BatchGetItem
operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem
returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested,\n or an internal processing failure occurs. If a partial result is returned, the operation returns a value for\n UnprocessedKeys
. You can use this value to retry the operation starting\n with the next item to get.
If you request more than 100 items, BatchGetItem
returns a\n ValidationException
with the message \"Too many items requested for\n the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys
value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem
returns a\n ProvisionedThroughputExceededException
. If at least\n one of the items is successfully processed, then\n BatchGetItem
completes successfully, while returning the keys of the\n unread items in UnprocessedKeys
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nBy default, BatchGetItem
performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead
to true
for any or all tables.
In order to minimize response latency, BatchGetItem
may retrieve items in\n parallel.
When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.
", + "smithy.api#documentation": "The BatchGetItem
operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem
returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is\n requested, or an internal processing failure occurs. If a partial result is returned,\n the operation returns a value for UnprocessedKeys
. You can use this value\n to retry the operation starting with the next item to get.
If you request more than 100 items, BatchGetItem
returns a\n ValidationException
with the message \"Too many items requested for\n the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys
value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem
returns a\n ProvisionedThroughputExceededException
. If at least\n one of the items is successfully processed, then\n BatchGetItem
completes successfully, while returning the keys of the\n unread items in UnprocessedKeys
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nBy default, BatchGetItem
performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead
to true
for any or all tables.
In order to minimize response latency, BatchGetItem
may retrieve items in\n parallel.
When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.
", "smithy.api#examples": [ { "title": "To retrieve multiple items from a table", @@ -919,7 +919,7 @@ "RequestItems": { "target": "com.amazonaws.dynamodb#BatchGetRequestMap", "traits": { - "smithy.api#documentation": "A map of one or more table names or table ARNs and, for each table, a map that describes one or more\n items to retrieve from that table. Each table name or ARN can be used only once per\n BatchGetItem
request.
Each element in the map of items to retrieve consists of the following:
\n\n ConsistentRead
- If true
, a strongly consistent read\n is used; if false
(the default), an eventually consistent read is\n used.
\n ExpressionAttributeNames
- One or more substitution tokens for\n attribute names in the ProjectionExpression
parameter. The\n following are some use cases for using\n ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved\n word.
\nTo create a placeholder for repeating occurrences of an attribute name\n in an expression.
\nTo prevent special characters in an attribute name from being\n misinterpreted in an expression.
\nUse the # character in an expression to\n dereference an attribute name. For example, consider the following attribute\n name:
\n\n Percentile
\n
The name of this attribute conflicts with a reserved word, so it cannot be\n used directly in an expression. (For the complete list of reserved words, see\n Reserved\n Words in the Amazon DynamoDB Developer Guide).\n To work around this, you could specify the following for\n ExpressionAttributeNames
:
\n {\"#P\":\"Percentile\"}
\n
You could then use this substitution in an expression, as in this\n example:
\n\n #P = :val
\n
Tokens that begin with the : character\n are expression attribute values, which are placeholders\n for the actual value at runtime.
\nFor more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.
\n\n Keys
- An array of primary key attribute values that define\n specific items in the table. For each primary key, you must provide\n all of the key attributes. For example, with a simple\n primary key, you only need to provide the partition key value. For a composite\n key, you must provide both the partition key value and the\n sort key value.
\n ProjectionExpression
- A string that identifies one or more\n attributes to retrieve from the table. These attributes can include scalars,\n sets, or elements of a JSON document. The attributes in the expression must be\n separated by commas.
If no attribute names are specified, then all attributes are returned. If any\n of the requested attributes are not found, they do not appear in the\n result.
\nFor more information, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.
\n\n AttributesToGet
- This is a legacy parameter. Use\n ProjectionExpression
instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer\n Guide.
A map of one or more table names or table ARNs and, for each table, a map that\n describes one or more items to retrieve from that table. Each table name or ARN can be\n used only once per BatchGetItem
request.
Each element in the map of items to retrieve consists of the following:
\n\n ConsistentRead
- If true
, a strongly consistent read\n is used; if false
(the default), an eventually consistent read is\n used.
\n ExpressionAttributeNames
- One or more substitution tokens for\n attribute names in the ProjectionExpression
parameter. The\n following are some use cases for using\n ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved\n word.
\nTo create a placeholder for repeating occurrences of an attribute name\n in an expression.
\nTo prevent special characters in an attribute name from being\n misinterpreted in an expression.
\nUse the # character in an expression to\n dereference an attribute name. For example, consider the following attribute\n name:
\n\n Percentile
\n
The name of this attribute conflicts with a reserved word, so it cannot be\n used directly in an expression. (For the complete list of reserved words, see\n Reserved\n Words in the Amazon DynamoDB Developer Guide).\n To work around this, you could specify the following for\n ExpressionAttributeNames
:
\n {\"#P\":\"Percentile\"}
\n
You could then use this substitution in an expression, as in this\n example:
\n\n #P = :val
\n
Tokens that begin with the : character\n are expression attribute values, which are placeholders\n for the actual value at runtime.
\nFor more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.
\n\n Keys
- An array of primary key attribute values that define\n specific items in the table. For each primary key, you must provide\n all of the key attributes. For example, with a simple\n primary key, you only need to provide the partition key value. For a composite\n key, you must provide both the partition key value and the\n sort key value.
\n ProjectionExpression
- A string that identifies one or more\n attributes to retrieve from the table. These attributes can include scalars,\n sets, or elements of a JSON document. The attributes in the expression must be\n separated by commas.
If no attribute names are specified, then all attributes are returned. If any\n of the requested attributes are not found, they do not appear in the\n result.
\nFor more information, see Accessing Item Attributes in the Amazon DynamoDB\n Developer Guide.
\n\n AttributesToGet
- This is a legacy parameter. Use\n ProjectionExpression
instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer\n Guide.
A map of table name or table ARN to a list of items. Each object in Responses
consists\n of a table name or ARN, along with a map of attribute data consisting of the data type and\n attribute value.
A map of table name or table ARN to a list of items. Each object in\n Responses
consists of a table name or ARN, along with a map of\n attribute data consisting of the data type and attribute value.
The BatchWriteItem
operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem
can transmit up to 16MB of\n data over the network, consisting of up to 25 item put or delete operations. While\n individual items can be up to 400 KB once stored, it's important to note that an item's\n representation might be greater than 400KB while being sent in DynamoDB's JSON format\n for the API call. For more details on this distinction, see Naming Rules and Data Types.
\n BatchWriteItem
cannot update items. If you perform a BatchWriteItem
\n operation on an existing item, that item's values will be overwritten by the\n operation and it will appear like it was updated. To update items, we recommend you\n use the UpdateItem
action.
The individual PutItem
and DeleteItem
operations specified\n in BatchWriteItem
are atomic; however BatchWriteItem
as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems
response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem
in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem
request with those unprocessed items\n until all items have been processed.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchWriteItem
returns a\n ProvisionedThroughputExceededException
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nWith BatchWriteItem
, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem
does not behave in the same way as individual\n PutItem
and DeleteItem
calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem
does not return deleted items in the response.
If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem
performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.
Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.
\nIf one or more of the following is true, DynamoDB rejects the entire batch write\n operation:
\nOne or more tables specified in the BatchWriteItem
request does\n not exist.
Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.
\nYou try to perform multiple operations on the same item in the same\n BatchWriteItem
request. For example, you cannot put and delete\n the same item in the same BatchWriteItem
request.
Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).
\nThere are more than 25 requests in the batch.
\nAny individual item in a batch exceeds 400 KB.
\nThe total request size exceeds 16 MB.
\nAny individual items with keys exceeding the key length limits. For a\n partition key, the limit is 2048 bytes and for a sort key, the limit is 1024\n bytes.
\nThe BatchWriteItem
operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem
can transmit up to 16MB of\n data over the network, consisting of up to 25 item put or delete operations. While\n individual items can be up to 400 KB once stored, it's important to note that an item's\n representation might be greater than 400KB while being sent in DynamoDB's JSON format\n for the API call. For more details on this distinction, see Naming Rules and Data Types.
\n BatchWriteItem
cannot update items. If you perform a\n BatchWriteItem
operation on an existing item, that item's values\n will be overwritten by the operation and it will appear like it was updated. To\n update items, we recommend you use the UpdateItem
action.
The individual PutItem
and DeleteItem
operations specified\n in BatchWriteItem
are atomic; however BatchWriteItem
as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems
response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem
in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem
request with those unprocessed items\n until all items have been processed.
For tables and indexes with provisioned capacity, if none of the items can be\n processed due to insufficient provisioned throughput on all of the tables in the\n request, then BatchWriteItem
returns a\n ProvisionedThroughputExceededException
. For all tables and indexes, if\n none of the items can be processed due to other throttling scenarios (such as exceeding\n partition level limits), then BatchWriteItem
returns a\n ThrottlingException
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nWith BatchWriteItem
, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem
does not behave in the same way as individual\n PutItem
and DeleteItem
calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem
does not return deleted items in the response.
If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem
performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.
Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.
\nIf one or more of the following is true, DynamoDB rejects the entire batch write\n operation:
\nOne or more tables specified in the BatchWriteItem
request does\n not exist.
Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.
\nYou try to perform multiple operations on the same item in the same\n BatchWriteItem
request. For example, you cannot put and delete\n the same item in the same BatchWriteItem
request.
Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).
\nThere are more than 25 requests in the batch.
\nAny individual item in a batch exceeds 400 KB.
\nThe total request size exceeds 16 MB.
\nAny individual items with keys exceeding the key length limits. For a\n partition key, the limit is 2048 bytes and for a sort key, the limit is 1024\n bytes.
\nA map of one or more table names or table ARNs and, for each table, a list of operations to be\n performed (DeleteRequest
or PutRequest
). Each element in the\n map consists of the following:
\n DeleteRequest
- Perform a DeleteItem
operation on the\n specified item. The item to be deleted is identified by a Key
\n subelement:
\n Key
- A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value. For each primary key, you must provide\n all of the key attributes. For example, with a\n simple primary key, you only need to provide a value for the partition\n key. For a composite primary key, you must provide values for\n both the partition key and the sort key.
\n PutRequest
- Perform a PutItem
operation on the\n specified item. The item to be put is identified by an Item
\n subelement:
\n Item
- A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values are rejected with a\n ValidationException
exception.
If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.
\nA map of one or more table names or table ARNs and, for each table, a list of\n operations to be performed (DeleteRequest
or PutRequest
). Each\n element in the map consists of the following:
\n DeleteRequest
- Perform a DeleteItem
operation on the\n specified item. The item to be deleted is identified by a Key
\n subelement:
\n Key
- A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value. For each primary key, you must provide\n all of the key attributes. For example, with a\n simple primary key, you only need to provide a value for the partition\n key. For a composite primary key, you must provide values for\n both the partition key and the sort key.
\n PutRequest
- Perform a PutItem
operation on the\n specified item. The item to be put is identified by an Item
\n subelement:
\n Item
- A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values are rejected with a\n ValidationException
exception.
If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.
\nA map of tables and requests against those tables that were not processed. The\n UnprocessedItems
value is in the same form as\n RequestItems
, so you can provide this value directly to a subsequent\n BatchWriteItem
operation. For more information, see\n RequestItems
in the Request Parameters section.
Each UnprocessedItems
entry consists of a table name or table ARN and, for that table,\n a list of operations to perform (DeleteRequest
or\n PutRequest
).
\n DeleteRequest
- Perform a DeleteItem
operation on the\n specified item. The item to be deleted is identified by a Key
\n subelement:
\n Key
- A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value.
\n PutRequest
- Perform a PutItem
operation on the\n specified item. The item to be put is identified by an Item
\n subelement:
\n Item
- A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values will be rejected with a\n ValidationException
exception.
If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.
\nIf there are no unprocessed items remaining, the response contains an empty\n UnprocessedItems
map.
A map of tables and requests against those tables that were not processed. The\n UnprocessedItems
value is in the same form as\n RequestItems
, so you can provide this value directly to a subsequent\n BatchWriteItem
operation. For more information, see\n RequestItems
in the Request Parameters section.
Each UnprocessedItems
entry consists of a table name or table ARN\n and, for that table, a list of operations to perform (DeleteRequest
or\n PutRequest
).
\n DeleteRequest
- Perform a DeleteItem
operation on the\n specified item. The item to be deleted is identified by a Key
\n subelement:
\n Key
- A map of primary key attribute values that uniquely\n identify the item. Each entry in this map consists of an attribute name\n and an attribute value.
\n PutRequest
- Perform a PutItem
operation on the\n specified item. The item to be put is identified by an Item
\n subelement:
\n Item
- A map of attributes and their values. Each entry in\n this map consists of an attribute name and an attribute value. Attribute\n values must not be null; string and binary type attributes must have\n lengths greater than zero; and set type attributes must not be empty.\n Requests that contain empty values will be rejected with a\n ValidationException
exception.
If you specify any attributes that are part of an index key, then the\n data types for those attributes must match those of the schema in the\n table's attribute definition.
\nIf there are no unprocessed items remaining, the response contains an empty\n UnprocessedItems
map.
The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.
", "smithy.api#required": {} } }, @@ -2141,7 +2141,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table to create. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to create. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.
", "smithy.api#required": {} } }, @@ -2203,19 +2203,19 @@ "DeletionProtectionEnabled": { "target": "com.amazonaws.dynamodb#DeletionProtectionEnabled", "traits": { - "smithy.api#documentation": "Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table.
" + "smithy.api#documentation": "Indicates whether deletion protection is to be enabled (true) or disabled (false) on\n the table.
" } }, "ResourcePolicy": { "target": "com.amazonaws.dynamodb#ResourcePolicy", "traits": { - "smithy.api#documentation": "An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.
\nWhen you attach a resource-based policy while creating a table, the policy application is strongly consistent.
\nThe maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.
\nYou need to specify the CreateTable
and PutResourcePolicy
IAM actions for authorizing a user to create a table with a resource-based policy.
An Amazon Web Services resource-based policy document in JSON format that will be\n attached to the table.
\nWhen you attach a resource-based policy while creating a table, the policy application\n is strongly consistent.
\nThe maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this\n limit. For a full list of all considerations that apply for resource-based policies, see\n Resource-based\n policy considerations.
\nYou need to specify the CreateTable
and\n PutResourcePolicy
\n IAM actions for authorizing a user to create a table with a\n resource-based policy.
Sets the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits
, MaxWriteRequestUnits
, or both.
Sets the maximum number of read and write units for the specified table in on-demand\n capacity mode. If you use this parameter, you must specify\n MaxReadRequestUnits
, MaxWriteRequestUnits
, or both.
The name of the table from which to delete the item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table from which to delete the item. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.
", "smithy.api#required": {} } }, @@ -2658,7 +2658,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Deletes the resource-based policy attached to the resource, which can be a table or stream.
\n\n DeleteResourcePolicy
is an idempotent operation; running it multiple times on the same resource doesn't result in an error response, unless you specify an ExpectedRevisionId
, which will then return a PolicyNotFoundException
.
To make sure that you don't inadvertently lock yourself out of your own resources, the root principal in your Amazon Web Services account can perform DeleteResourcePolicy
requests, even if your resource-based policy explicitly denies the root principal's access.\n
\n DeleteResourcePolicy
is an asynchronous operation. If you issue a GetResourcePolicy
request immediately after running the DeleteResourcePolicy
request, DynamoDB might still return the deleted policy. This is because the policy for your resource might not have been deleted yet. Wait for a few seconds, and then try the GetResourcePolicy
request again.
Deletes the resource-based policy attached to the resource, which can be a table or\n stream.
\n\n DeleteResourcePolicy
is an idempotent operation; running it multiple\n times on the same resource doesn't result in an error response,\n unless you specify an ExpectedRevisionId
, which will then return a\n PolicyNotFoundException
.
To make sure that you don't inadvertently lock yourself out of your own resources,\n the root principal in your Amazon Web Services account can perform\n DeleteResourcePolicy
requests, even if your resource-based policy\n explicitly denies the root principal's access.
\n DeleteResourcePolicy
is an asynchronous operation. If you issue a\n GetResourcePolicy
request immediately after running the\n DeleteResourcePolicy
request, DynamoDB might still return\n the deleted policy. This is because the policy for your resource might not have been\n deleted yet. Wait for a few seconds, and then try the GetResourcePolicy
\n request again.
The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy will be removed. The resources you can specify include tables and streams. If you remove the policy of a table, it will also remove the permissions for the table's indexes defined in that policy document. This is because index permissions are defined in the table's policy.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy will be\n removed. The resources you can specify include tables and streams. If you remove the\n policy of a table, it will also remove the permissions for the table's indexes defined\n in that policy document. This is because index permissions are defined in the table's\n policy.
", "smithy.api#required": {} } }, "ExpectedRevisionId": { "target": "com.amazonaws.dynamodb#PolicyRevisionId", "traits": { - "smithy.api#documentation": "A string value that you can use to conditionally delete your policy. When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, the request will fail and return a PolicyNotFoundException
.
A string value that you can use to conditionally delete your policy. When you provide\n an expected revision ID, if the revision ID of the existing policy on the resource\n doesn't match or if there's no policy attached to the resource, the request will fail\n and return a PolicyNotFoundException
.
A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.
\nThis value will be empty if you make a request against a resource without a policy.
" + "smithy.api#documentation": "A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.
\nThis value will be empty if you make a request against a resource without a\n policy.
" } } }, @@ -2725,7 +2725,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "The DeleteTable
operation deletes a table and all of its items. After a\n DeleteTable
request, the specified table is in the\n DELETING
state until DynamoDB completes the deletion. If the table is\n in the ACTIVE
state, you can delete it. If a table is in\n CREATING
or UPDATING
states, then DynamoDB returns a\n ResourceInUseException
. If the specified table does not exist, DynamoDB\n returns a ResourceNotFoundException
. If table is already in the\n DELETING
state, no error is returned.
For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n
\nDynamoDB might continue to accept data read and write operations, such as\n GetItem
and PutItem
, on a table in the\n DELETING
state until the table deletion is complete.
When you delete a table, any indexes on that table are also deleted.
\nIf you have DynamoDB Streams enabled on the table, then the corresponding stream on\n that table goes into the DISABLED
state, and the stream is automatically\n deleted after 24 hours.
Use the DescribeTable
action to check the status of the table.
The DeleteTable
operation deletes a table and all of its items. After a\n DeleteTable
request, the specified table is in the\n DELETING
state until DynamoDB completes the deletion. If the table is\n in the ACTIVE
state, you can delete it. If a table is in\n CREATING
or UPDATING
states, then DynamoDB returns a\n ResourceInUseException
. If the specified table does not exist, DynamoDB\n returns a ResourceNotFoundException
. If table is already in the\n DELETING
state, no error is returned.
For global tables, this operation only applies to\n global tables using Version 2019.11.21 (Current version).
\nDynamoDB might continue to accept data read and write operations, such as\n GetItem
and PutItem
, on a table in the\n DELETING
state until the table deletion is complete. For the full\n list of table states, see TableStatus.
When you delete a table, any indexes on that table are also deleted.
\nIf you have DynamoDB Streams enabled on the table, then the corresponding stream on\n that table goes into the DISABLED
state, and the stream is automatically\n deleted after 24 hours.
Use the DescribeTable
action to check the status of the table.
The name of the table to delete. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to delete. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.
", "smithy.api#required": {} } } @@ -2920,7 +2920,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.
", "smithy.api#required": {} } }, @@ -2988,7 +2988,7 @@ "target": "com.amazonaws.dynamodb#DescribeEndpointsResponse" }, "traits": { - "smithy.api#documentation": "Returns the regional endpoint information. For more information \n on policy permissions, please see Internetwork traffic privacy.
" + "smithy.api#documentation": "Returns the regional endpoint information. For more information on policy permissions,\n please see Internetwork traffic privacy.
" } }, "com.amazonaws.dynamodb#DescribeEndpointsRequest": { @@ -3260,7 +3260,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table being described. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table being described. You can also provide the Amazon Resource Name (ARN) of the table\n in this parameter.
", "smithy.api#required": {} } } @@ -3388,7 +3388,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Returns information about the table, including the current status of the table, when\n it was created, the primary key schema, and any indexes on the table.
\nFor global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n
\nIf you issue a DescribeTable
request immediately after a\n CreateTable
request, DynamoDB might return a\n ResourceNotFoundException
. This is because\n DescribeTable
uses an eventually consistent query, and the metadata\n for your table might not be available at that moment. Wait for a few seconds, and\n then try the DescribeTable
request again.
Returns information about the table, including the current status of the table, when\n it was created, the primary key schema, and any indexes on the table.
\nFor global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).
\nIf you issue a DescribeTable
request immediately after a\n CreateTable
request, DynamoDB might return a\n ResourceNotFoundException
. This is because\n DescribeTable
uses an eventually consistent query, and the metadata\n for your table might not be available at that moment. Wait for a few seconds, and\n then try the DescribeTable
request again.
The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to describe. You can also provide the Amazon Resource Name (ARN) of the table in\n this parameter.
", "smithy.api#required": {} } } @@ -3473,7 +3473,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes auto scaling settings across replicas of the global table at once.
\nFor global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).
\nDescribes auto scaling settings across replicas of the global table at once.
\nFor global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).
\nThe name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.
", "smithy.api#required": {} } } @@ -3537,7 +3537,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table to be described. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to be described. You can also provide the Amazon Resource Name (ARN) of the table\n in this parameter.
", "smithy.api#required": {} } } @@ -3630,7 +3630,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Stops replication from the DynamoDB table to the Kinesis data stream. This is done\n without deleting either of the resources.
" + "smithy.api#documentation": "Stops replication from the DynamoDB table to the Kinesis data stream. This\n is done without deleting either of the resources.
" } }, "com.amazonaws.dynamodb#DoubleObject": { @@ -5057,7 +5057,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation allows you to perform reads and singleton writes on data stored in\n DynamoDB, using PartiQL.
\nFor PartiQL reads (SELECT
statement), if the total number of processed\n items exceeds the maximum dataset size limit of 1 MB, the read stops and results are\n returned to the user as a LastEvaluatedKey
value to continue the read in a\n subsequent operation. If the filter criteria in WHERE
clause does not match\n any data, the read will return an empty result set.
A single SELECT
statement response can return up to the maximum number of\n items (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any\n filtering to the results using WHERE
clause). If\n LastEvaluatedKey
is present in the response, you need to paginate the\n result set. If NextToken
is present, you need to paginate the result set and include \n NextToken
.
This operation allows you to perform reads and singleton writes on data stored in\n DynamoDB, using PartiQL.
\nFor PartiQL reads (SELECT
statement), if the total number of processed\n items exceeds the maximum dataset size limit of 1 MB, the read stops and results are\n returned to the user as a LastEvaluatedKey
value to continue the read in a\n subsequent operation. If the filter criteria in WHERE
clause does not match\n any data, the read will return an empty result set.
A single SELECT
statement response can return up to the maximum number of\n items (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any\n filtering to the results using WHERE
clause). If\n LastEvaluatedKey
is present in the response, you need to paginate the\n result set. If NextToken
is present, you need to paginate the result set\n and include NextToken
.
An optional parameter that returns the item attributes for an\n ExecuteStatement
operation that failed a condition check.
There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.
" + "smithy.api#documentation": "An optional parameter that returns the item attributes for an\n ExecuteStatement
operation that failed a condition check.
There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.
" } } }, @@ -5580,7 +5580,7 @@ "S3BucketOwner": { "target": "com.amazonaws.dynamodb#S3BucketOwner", "traits": { - "smithy.api#documentation": "The ID of the Amazon Web Services account that owns the bucket the export will be\n stored in.
\nS3BucketOwner is a required parameter when exporting to a S3 bucket in another account.
\nThe ID of the Amazon Web Services account that owns the bucket the export will be\n stored in.
\nS3BucketOwner is a required parameter when exporting to a S3 bucket in another\n account.
\nChoice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be used.
" + "smithy.api#documentation": "Choice of whether to execute as a full export or incremental export. Valid values are\n FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. If\n INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be\n used.
" } }, "IncrementalExportSpecification": { @@ -5839,7 +5839,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table containing the requested item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table containing the requested item. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.
", "smithy.api#required": {} } }, @@ -5895,7 +5895,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "The capacity units consumed by the GetItem
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer\n Guide.
The capacity units consumed by the GetItem
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon\n DynamoDB Developer Guide.
Returns the resource-based policy document attached to the resource, which can be a table or stream, in JSON format.
\n\n GetResourcePolicy
follows an \n eventually consistent\n model. The following list describes the outcomes when you issue the GetResourcePolicy
request immediately after issuing another request:
If you issue a GetResourcePolicy
request immediately after a PutResourcePolicy
request, DynamoDB might return a PolicyNotFoundException
.
If you issue a GetResourcePolicy
request immediately after a DeleteResourcePolicy
request, DynamoDB might return the policy that was present before the deletion request.
If you issue a GetResourcePolicy
request immediately after a CreateTable
request, which includes a resource-based policy, DynamoDB might return a ResourceNotFoundException
or a PolicyNotFoundException
.
Because GetResourcePolicy
uses an eventually consistent query, the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then retry the GetResourcePolicy
request.
After a GetResourcePolicy
request returns a policy created using the PutResourcePolicy
request, the policy will be applied in the authorization of requests to the resource. Because this process is eventually consistent, it will take some time to apply the policy to all requests to a resource. Policies that you attach while creating a table using the CreateTable
request will always be applied to all requests for that table.
Returns the resource-based policy document attached to the resource, which can be a\n table or stream, in JSON format.
\n\n GetResourcePolicy
follows an \n eventually consistent\n model. The following list\n describes the outcomes when you issue the GetResourcePolicy
request\n immediately after issuing another request:
If you issue a GetResourcePolicy
request immediately after a\n PutResourcePolicy
request, DynamoDB might return a\n PolicyNotFoundException
.
If you issue a GetResourcePolicy
request immediately after a\n DeleteResourcePolicy
request, DynamoDB might return\n the policy that was present before the deletion request.
If you issue a GetResourcePolicy
request immediately after a\n CreateTable
request, which includes a resource-based policy,\n DynamoDB might return a ResourceNotFoundException
or\n a PolicyNotFoundException
.
Because GetResourcePolicy
uses an eventually\n consistent query, the metadata for your policy or table might not be\n available at that moment. Wait for a few seconds, and then retry the\n GetResourcePolicy
request.
After a GetResourcePolicy
request returns a policy created using the\n PutResourcePolicy
request, the policy will be applied in the\n authorization of requests to the resource. Because this process is eventually\n consistent, it will take some time to apply the policy to all requests to a resource.\n Policies that you attach while creating a table using the CreateTable
\n request will always be applied to all requests for that table.
The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is attached. The resources you can specify include tables and streams.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is attached. The\n resources you can specify include tables and streams.
", "smithy.api#required": {} } } @@ -5954,7 +5954,7 @@ "Policy": { "target": "com.amazonaws.dynamodb#ResourcePolicy", "traits": { - "smithy.api#documentation": "The resource-based policy document attached to the resource, which can be a table or stream, in JSON format.
" + "smithy.api#documentation": "The resource-based policy document attached to the resource, which can be a table or\n stream, in JSON format.
" } }, "RevisionId": { @@ -7214,7 +7214,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the DynamoDB table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the DynamoDB table. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.
", "smithy.api#required": {} } }, @@ -7306,7 +7306,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. \n To list these backups for a given table, specify TableName
. ListBackups
returns a\n paginated list of results with at most 1 MB worth of items in a page. You can also\n specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these\n boundaries are for the time at which the original backup was requested.
\nYou can call ListBackups
a maximum of five times per second.
If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the \n Amazon Web Services Backup list API.\n
" + "smithy.api#documentation": "List DynamoDB backups that are associated with an Amazon Web Services account and\n weren't made with Amazon Web Services Backup. To list these backups for a given table,\n specify TableName
. ListBackups
returns a paginated list of\n results with at most 1 MB worth of items in a page. You can also specify a maximum\n number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these\n boundaries are for the time at which the original backup was requested.
\nYou can call ListBackups
a maximum of five times per second.
If you want to retrieve the complete list of backups made with Amazon Web Services\n Backup, use the Amazon Web Services Backup\n list API.\n
" } }, "com.amazonaws.dynamodb#ListBackupsInput": { @@ -7315,7 +7315,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "Lists the backups from the table specified in TableName
. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
Lists the backups from the table specified in TableName
. You can also\n provide the Amazon Resource Name (ARN) of the table in this parameter.
The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
" + "smithy.api#documentation": "The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.
" } }, "NextToken": { @@ -8490,7 +8490,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Creates a new item, or replaces an old item with a new item. If an item that has the\n same primary key as the new item already exists in the specified table, the new item\n completely replaces the existing item. You can perform a conditional put operation (add\n a new item if one with the specified primary key doesn't exist), or replace an existing\n item if it has certain attribute values. You can return the item's attribute values in\n the same operation, using the ReturnValues
parameter.
When you add an item, the primary key attributes are the only required attributes.\n
\nEmpty String and Binary attribute values are allowed. Attribute values of type String\n and Binary must have a length greater than zero if the attribute is used as a key\n attribute for a table or index. Set type attributes cannot be empty.
\nInvalid Requests with empty values will be rejected with a\n ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional\n expression that contains the attribute_not_exists
function with the\n name of the attribute being used as the partition key for the table. Since every\n record must contain that attribute, the attribute_not_exists
function\n will only succeed if no matching item exists.
For more information about PutItem
, see Working with\n Items in the Amazon DynamoDB Developer Guide.
Creates a new item, or replaces an old item with a new item. If an item that has the\n same primary key as the new item already exists in the specified table, the new item\n completely replaces the existing item. You can perform a conditional put operation (add\n a new item if one with the specified primary key doesn't exist), or replace an existing\n item if it has certain attribute values. You can return the item's attribute values in\n the same operation, using the ReturnValues
parameter.
When you add an item, the primary key attributes are the only required attributes.
\nEmpty String and Binary attribute values are allowed. Attribute values of type String\n and Binary must have a length greater than zero if the attribute is used as a key\n attribute for a table or index. Set type attributes cannot be empty.
\nInvalid Requests with empty values will be rejected with a\n ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional\n expression that contains the attribute_not_exists
function with the\n name of the attribute being used as the partition key for the table. Since every\n record must contain that attribute, the attribute_not_exists
function\n will only succeed if no matching item exists.
For more information about PutItem
, see Working with\n Items in the Amazon DynamoDB Developer Guide.
The name of the table to contain the item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to contain the item. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.
", "smithy.api#required": {} } }, @@ -8615,7 +8615,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "The capacity units consumed by the PutItem
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer\n Guide.
The capacity units consumed by the PutItem
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unity consumption for write operations in the Amazon\n DynamoDB Developer Guide.
Attaches a resource-based policy document to the resource, which can be a table or stream. When you attach a resource-based policy using this API, the policy application is \n eventually consistent\n .
\n\n PutResourcePolicy
is an idempotent operation; running it multiple times on the same resource using the same policy document will return the same revision ID. If you specify an ExpectedRevisionId
that doesn't match the current policy's RevisionId
, the PolicyNotFoundException
will be returned.
\n PutResourcePolicy
is an asynchronous operation. If you issue a GetResourcePolicy
request immediately after a PutResourcePolicy
request, DynamoDB might return your previous policy, if there was one, or return the PolicyNotFoundException
. This is because GetResourcePolicy
uses an eventually consistent query, and the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then try the GetResourcePolicy
request again.
Attaches a resource-based policy document to the resource, which can be a table or\n stream. When you attach a resource-based policy using this API, the policy application\n is \n eventually consistent\n .
\n\n PutResourcePolicy
is an idempotent operation; running it multiple times\n on the same resource using the same policy document will return the same revision ID. If\n you specify an ExpectedRevisionId
that doesn't match the current policy's\n RevisionId
, the PolicyNotFoundException
will be\n returned.
\n PutResourcePolicy
is an asynchronous operation. If you issue a\n GetResourcePolicy
request immediately after a\n PutResourcePolicy
request, DynamoDB might return your\n previous policy, if there was one, or return the\n PolicyNotFoundException
. This is because\n GetResourcePolicy
uses an eventually consistent query, and the\n metadata for your policy or table might not be available at that moment. Wait for a\n few seconds, and then try the GetResourcePolicy
request again.
The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy will be attached. The resources you can specify include tables and streams.
\nYou can control index permissions using the base table's policy. To specify the same permission level for your table and its indexes, you can provide both the table and index Amazon Resource Name (ARN)s in the Resource
field of a given Statement
in your policy document. Alternatively, to specify different permissions for your table, indexes, or both, you can define multiple Statement
fields in your policy document.
The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy will be attached.\n The resources you can specify include tables and streams.
\nYou can control index permissions using the base table's policy. To specify the same permission level for your table and its indexes, you can provide both the table and index Amazon Resource Name (ARN)s in the Resource
field of a given Statement
in your policy document. Alternatively, to specify different permissions for your table, indexes, or both, you can define multiple Statement
fields in your policy document.
An Amazon Web Services resource-based policy document in JSON format.
\nThe maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit.
\nWithin a resource-based policy, if the action for a DynamoDB service-linked role (SLR) to replicate data for a global table is denied, adding or deleting a replica will fail with an error.
\nFor a full list of all considerations that apply while attaching a resource-based policy, see Resource-based policy considerations.
", + "smithy.api#documentation": "An Amazon Web Services resource-based policy document in JSON format.
\nThe maximum size supported for a resource-based policy document is 20 KB.\n DynamoDB counts whitespaces when calculating the size of a policy\n against this limit.
\nWithin a resource-based policy, if the action for a DynamoDB\n service-linked role (SLR) to replicate data for a global table is denied, adding\n or deleting a replica will fail with an error.
\nFor a full list of all considerations that apply while attaching a resource-based\n policy, see Resource-based\n policy considerations.
", "smithy.api#required": {} } }, "ExpectedRevisionId": { "target": "com.amazonaws.dynamodb#PolicyRevisionId", "traits": { - "smithy.api#documentation": "A string value that you can use to conditionally update your policy. You can provide the revision ID of your existing policy to make mutating requests against that policy.
\nWhen you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, your request will be rejected with a PolicyNotFoundException
.
To conditionally attach a policy when no policy exists for the resource, specify NO_POLICY
for the revision ID.
A string value that you can use to conditionally update your policy. You can provide\n the revision ID of your existing policy to make mutating requests against that\n policy.
\nWhen you provide an expected revision ID, if the revision ID of the existing\n policy on the resource doesn't match or if there's no policy attached to the\n resource, your request will be rejected with a\n PolicyNotFoundException
.
To conditionally attach a policy when no policy exists for the resource, specify\n NO_POLICY
for the revision ID.
Set this parameter to true
to confirm that you want to remove your permissions to change the policy of this resource in the future.
Set this parameter to true
to confirm that you want to remove your\n permissions to change the policy of this resource in the future.
The name of the table containing the requested items. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table containing the requested items. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.
", "smithy.api#required": {} } }, @@ -8818,7 +8818,7 @@ "Select": { "target": "com.amazonaws.dynamodb#Select", "traits": { - "smithy.api#documentation": "The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.
\n\n ALL_ATTRIBUTES
- Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.
\n ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES
.
\n COUNT
- Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read capacity units \n as getting the items, and is subject to the same item size calculations.
\n SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in\n ProjectionExpression
. This return value is equivalent to\n specifying ProjectionExpression
without specifying any value for\n Select
.
If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation will read only the index and not\n the table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.
\nIf you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.
\nIf neither Select
nor ProjectionExpression
are specified,\n DynamoDB defaults to ALL_ATTRIBUTES
when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot use both\n Select
and ProjectionExpression
together in a single\n request, unless the value for Select
is SPECIFIC_ATTRIBUTES
.\n (This usage is equivalent to specifying ProjectionExpression
without any\n value for Select
.)
If you use the ProjectionExpression
parameter, then the value for\n Select
can only be SPECIFIC_ATTRIBUTES
. Any other\n value for Select
will return an error.
The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.
\n\n ALL_ATTRIBUTES
- Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.
\n ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES
.
\n COUNT
- Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read\n capacity units as getting the items, and is subject to the same item size\n calculations.
\n SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in\n ProjectionExpression
. This return value is equivalent to\n specifying ProjectionExpression
without specifying any value for\n Select
.
If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation will read only the index and not\n the table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.
\nIf you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.
\nIf neither Select
nor ProjectionExpression
are specified,\n DynamoDB defaults to ALL_ATTRIBUTES
when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot use both\n Select
and ProjectionExpression
together in a single\n request, unless the value for Select
is SPECIFIC_ATTRIBUTES
.\n (This usage is equivalent to specifying ProjectionExpression
without any\n value for Select
.)
If you use the ProjectionExpression
parameter, then the value for\n Select
can only be SPECIFIC_ATTRIBUTES
. Any other\n value for Select
will return an error.
A string that contains conditions that DynamoDB applies after the Query
\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression
criteria are not returned.
A FilterExpression
does not allow key attributes. You cannot define a\n filter expression based on a partition key or a sort key.
A FilterExpression
is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.
For more information, see Filter Expressions in the Amazon DynamoDB Developer\n Guide.
" + "smithy.api#documentation": "A string that contains conditions that DynamoDB applies after the Query
\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression
criteria are not returned.
A FilterExpression
does not allow key attributes. You cannot define a\n filter expression based on a partition key or a sort key.
A FilterExpression
is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.
For more information, see Filter\n Expressions in the Amazon DynamoDB Developer\n Guide.
" } }, "KeyConditionExpression": { @@ -8928,7 +8928,7 @@ "target": "com.amazonaws.dynamodb#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of items evaluated, before any QueryFilter
is applied. A high\n ScannedCount
value with few, or no, Count
results\n indicates an inefficient Query
operation. For more information, see Count and\n ScannedCount in the Amazon DynamoDB Developer\n Guide.
If you did not use a filter in the request, then ScannedCount
is the same\n as Count
.
The number of items evaluated, before any QueryFilter
is applied. A high\n ScannedCount
value with few, or no, Count
results\n indicates an inefficient Query
operation. For more information, see Count and\n ScannedCount in the Amazon DynamoDB Developer\n Guide.
If you did not use a filter in the request, then ScannedCount
is the same\n as Count
.
The capacity units consumed by the Query
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer\n Guide.
The capacity units consumed by the Query
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon\n DynamoDB Developer Guide.
Restores the specified table to the specified point in time within\n EarliestRestorableDateTime
and LatestRestorableDateTime
.\n You can restore your table to any point in time during the last 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.
\nAlong with data, the following are also included on the new restored table using\n point in time recovery:
\nGlobal secondary indexes (GSIs)
\nLocal secondary indexes (LSIs)
\nProvisioned read and write capacity
\nEncryption settings
\nAll these settings come from the current settings of the source table at\n the time of restore.
\nYou must manually set up the following on the restored table:
\nAuto scaling policies
\nIAM policies
\nAmazon CloudWatch metrics and alarms
\nTags
\nStream settings
\nTime to Live (TTL) settings
\nPoint in time recovery settings
\nRestores the specified table to the specified point in time within\n EarliestRestorableDateTime
and LatestRestorableDateTime
.\n You can restore your table to any point in time during the last 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.
\nAlong with data, the following are also included on the new restored table using point\n in time recovery:
\nGlobal secondary indexes (GSIs)
\nLocal secondary indexes (LSIs)
\nProvisioned read and write capacity
\nEncryption settings
\nAll these settings come from the current settings of the source table at\n the time of restore.
\nYou must manually set up the following on the restored table:
\nAuto scaling policies
\nIAM policies
\nAmazon CloudWatch metrics and alarms
\nTags
\nStream settings
\nTime to Live (TTL) settings
\nPoint in time recovery settings
\nThe Scan
operation returns one or more items and item attributes by\n accessing every item in a table or a secondary index. To have DynamoDB return fewer\n items, you can provide a FilterExpression
operation.
If the total size of scanned items exceeds the maximum dataset size limit of 1 MB,\n the scan completes and results are returned to the user. The LastEvaluatedKey
\n value is also returned and the requestor can use the LastEvaluatedKey
to continue \n the scan in a subsequent operation. Each scan response also includes number of items that were \n scanned (ScannedCount) as part of the request. If using a FilterExpression
, a scan result \n can result in no items meeting the criteria and the Count
will result in zero. If \n you did not use a FilterExpression
in the scan request, then Count
is \n the same as ScannedCount
.
\n Count
and ScannedCount
only return the count of items specific to a \n single scan request and, unless the table is less than 1MB, do not represent the total number \n of items in the table.\n
A single Scan
operation first reads up to the maximum number of items set (if\n using the Limit
parameter) or a maximum of 1 MB of data and then applies any\n filtering to the results if a FilterExpression
is provided. If\n LastEvaluatedKey
is present in the response, pagination is required to complete the\n full table scan. For more information, see Paginating the\n Results in the Amazon DynamoDB Developer Guide.
\n Scan
operations proceed sequentially; however, for faster performance on\n a large table or secondary index, applications can request a parallel Scan
\n operation by providing the Segment
and TotalSegments
\n parameters. For more information, see Parallel\n Scan in the Amazon DynamoDB Developer Guide.
By default, a Scan
uses eventually consistent reads when accessing the items in a table. \n Therefore, the results from an eventually consistent Scan
may not include the latest item \n changes at the time the scan iterates through each item in the table. If you require a strongly consistent \n read of each item as the scan iterates through the items in the table, you can set the ConsistentRead
\n parameter to true. Strong consistency only relates to the consistency of the read at the item level.
\n DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead
\n parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan \n see a consistent snapshot of the table when the scan operation was requested.\n
The Scan
operation returns one or more items and item attributes by\n accessing every item in a table or a secondary index. To have DynamoDB return fewer\n items, you can provide a FilterExpression
operation.
If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the\n scan completes and results are returned to the user. The LastEvaluatedKey
\n value is also returned and the requestor can use the LastEvaluatedKey
to\n continue the scan in a subsequent operation. Each scan response also includes number of\n items that were scanned (ScannedCount) as part of the request. If using a\n FilterExpression
, a scan result can result in no items meeting the\n criteria and the Count
will result in zero. If you did not use a\n FilterExpression
in the scan request, then Count
is the\n same as ScannedCount
.
\n Count
and ScannedCount
only return the count of items\n specific to a single scan request and, unless the table is less than 1MB, do not\n represent the total number of items in the table.
A single Scan
operation first reads up to the maximum number of items set\n (if using the Limit
parameter) or a maximum of 1 MB of data and then\n applies any filtering to the results if a FilterExpression
is provided. If\n LastEvaluatedKey
is present in the response, pagination is required to\n complete the full table scan. For more information, see Paginating the\n Results in the Amazon DynamoDB Developer Guide.
\n Scan
operations proceed sequentially; however, for faster performance on\n a large table or secondary index, applications can request a parallel Scan
\n operation by providing the Segment
and TotalSegments
\n parameters. For more information, see Parallel\n Scan in the Amazon DynamoDB Developer Guide.
By default, a Scan
uses eventually consistent reads when accessing the\n items in a table. Therefore, the results from an eventually consistent Scan
\n may not include the latest item changes at the time the scan iterates through each item\n in the table. If you require a strongly consistent read of each item as the scan\n iterates through the items in the table, you can set the ConsistentRead
\n parameter to true. Strong consistency only relates to the consistency of the read at the\n item level.
DynamoDB does not provide snapshot isolation for a scan operation when the\n ConsistentRead
parameter is set to true. Thus, a DynamoDB scan\n operation does not guarantee that all reads in a scan see a consistent snapshot of\n the table when the scan operation was requested.
The name of the table containing the requested items or if you provide\n IndexName
, the name of the table to which that index belongs.
You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table containing the requested items or if you provide\n IndexName
, the name of the table to which that index belongs.
You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", "smithy.api#required": {} } }, @@ -10337,7 +10337,7 @@ "Select": { "target": "com.amazonaws.dynamodb#Select", "traits": { - "smithy.api#documentation": "The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.
\n\n ALL_ATTRIBUTES
- Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.
\n ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES
.
\n COUNT
- Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read capacity units \n as getting the items, and is subject to the same item size calculations.
\n SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in\n ProjectionExpression
. This return value is equivalent to\n specifying ProjectionExpression
without specifying any value for\n Select
.
If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation reads only the index and not the\n table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.
\nIf you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.
\nIf neither Select
nor ProjectionExpression
are specified,\n DynamoDB defaults to ALL_ATTRIBUTES
when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot use both\n Select
and ProjectionExpression
together in a single\n request, unless the value for Select
is SPECIFIC_ATTRIBUTES
.\n (This usage is equivalent to specifying ProjectionExpression
without any\n value for Select
.)
If you use the ProjectionExpression
parameter, then the value for\n Select
can only be SPECIFIC_ATTRIBUTES
. Any other\n value for Select
will return an error.
The attributes to be returned in the result. You can retrieve all item attributes,\n specific item attributes, the count of matching items, or in the case of an index, some\n or all of the attributes projected into the index.
\n\n ALL_ATTRIBUTES
- Returns all of the item attributes from the\n specified table or index. If you query a local secondary index, then for each\n matching item in the index, DynamoDB fetches the entire item from the parent\n table. If the index is configured to project all item attributes, then all of\n the data can be obtained from the local secondary index, and no fetching is\n required.
\n ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.\n Retrieves all attributes that have been projected into the index. If the index\n is configured to project all attributes, this return value is equivalent to\n specifying ALL_ATTRIBUTES
.
\n COUNT
- Returns the number of matching items, rather than the\n matching items themselves. Note that this uses the same quantity of read\n capacity units as getting the items, and is subject to the same item size\n calculations.
\n SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in\n ProjectionExpression
. This return value is equivalent to\n specifying ProjectionExpression
without specifying any value for\n Select
.
If you query or scan a local secondary index and request only attributes that\n are projected into that index, the operation reads only the index and not the\n table. If any of the requested attributes are not projected into the local\n secondary index, DynamoDB fetches each of these attributes from the parent\n table. This extra fetching incurs additional throughput cost and latency.
\nIf you query or scan a global secondary index, you can only request attributes\n that are projected into the index. Global secondary index queries cannot fetch\n attributes from the parent table.
\nIf neither Select
nor ProjectionExpression
are specified,\n DynamoDB defaults to ALL_ATTRIBUTES
when accessing a table, and\n ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot use both\n Select
and ProjectionExpression
together in a single\n request, unless the value for Select
is SPECIFIC_ATTRIBUTES
.\n (This usage is equivalent to specifying ProjectionExpression
without any\n value for Select
.)
If you use the ProjectionExpression
parameter, then the value for\n Select
can only be SPECIFIC_ATTRIBUTES
. Any other\n value for Select
will return an error.
A string that contains conditions that DynamoDB applies after the Scan
\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression
criteria are not returned.
A FilterExpression
is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.
For more information, see Filter Expressions in the Amazon DynamoDB Developer\n Guide.
" + "smithy.api#documentation": "A string that contains conditions that DynamoDB applies after the Scan
\n operation, but before the data is returned to you. Items that do not satisfy the\n FilterExpression
criteria are not returned.
A FilterExpression
is applied after the items have already been read;\n the process of filtering does not consume any additional read capacity units.
For more information, see Filter\n Expressions in the Amazon DynamoDB Developer\n Guide.
" } }, "ExpressionAttributeNames": { @@ -10441,7 +10441,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "The capacity units consumed by the Scan
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see \n Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.
The capacity units consumed by the Scan
operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity
is only\n returned if the ReturnConsumedCapacity
parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon\n DynamoDB Developer Guide.
The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.
", "smithy.api#required": {} } }, @@ -11744,7 +11744,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table. You can also provide the Amazon Resource Name (ARN) of the table in this\n parameter.
", "smithy.api#required": {} } }, @@ -11854,7 +11854,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Adds or removes replicas in the specified global table. The global table must already\n exist to be able to use this operation. Any replica to be added must be empty, have the\n same name as the global table, have the same key schema, have DynamoDB Streams enabled,\n and have the same provisioned and maximum write capacity units.
\nThis documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).
\nTo determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.
\n\n For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version\n 2019.11.21 you can use UpdateTable instead.\n
\n\n Although you can use UpdateGlobalTable
to add replicas and remove\n replicas in a single request, for simplicity we recommend that you issue separate\n requests for adding or removing replicas.\n
If global secondary indexes are specified, then the following conditions must also be\n met:
\nThe global secondary indexes must have the same name.
\nThe global secondary indexes must have the same hash key and sort key (if\n present).
\nThe global secondary indexes must have the same provisioned and maximum write\n capacity units.
\nAdds or removes replicas in the specified global table. The global table must already\n exist to be able to use this operation. Any replica to be added must be empty, have the\n same name as the global table, have the same key schema, have DynamoDB Streams enabled,\n and have the same provisioned and maximum write capacity units.
\nThis documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).
\nTo determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.
\nFor global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version). If you are using global tables Version\n 2019.11.21 you can use UpdateTable instead.
\n Although you can use UpdateGlobalTable
to add replicas and remove\n replicas in a single request, for simplicity we recommend that you issue separate\n requests for adding or removing replicas.
If global secondary indexes are specified, then the following conditions must also be\n met:
\nThe global secondary indexes must have the same name.
\nThe global secondary indexes must have the same hash key and sort key (if\n present).
\nThe global secondary indexes must have the same provisioned and maximum write\n capacity units.
\nThe name of the table containing the item to update. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table containing the item to update. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.
", "smithy.api#required": {} } }, @@ -12122,7 +12122,7 @@ "ReturnValues": { "target": "com.amazonaws.dynamodb#ReturnValue", "traits": { - "smithy.api#documentation": "Use ReturnValues
if you want to get the item attributes as they appear\n before or after they are successfully updated. For UpdateItem
, the valid values\n are:
\n NONE
- If ReturnValues
is not specified, or if its\n value is NONE
, then nothing is returned. (This setting is the\n default for ReturnValues
.)
\n ALL_OLD
- Returns all of the attributes of the item, as they\n appeared before the UpdateItem operation.
\n UPDATED_OLD
- Returns only the updated attributes, as they appeared\n before the UpdateItem operation.
\n ALL_NEW
- Returns all of the attributes of the item, as they appear\n after the UpdateItem operation.
\n UPDATED_NEW
- Returns only the updated attributes, as they appear\n after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.
\nThe values returned are strongly consistent.
" + "smithy.api#documentation": "Use ReturnValues
if you want to get the item attributes as they appear\n before or after they are successfully updated. For UpdateItem
, the valid\n values are:
\n NONE
- If ReturnValues
is not specified, or if its\n value is NONE
, then nothing is returned. (This setting is the\n default for ReturnValues
.)
\n ALL_OLD
- Returns all of the attributes of the item, as they\n appeared before the UpdateItem operation.
\n UPDATED_OLD
- Returns only the updated attributes, as they appeared\n before the UpdateItem operation.
\n ALL_NEW
- Returns all of the attributes of the item, as they appear\n after the UpdateItem operation.
\n UPDATED_NEW
- Returns only the updated attributes, as they appear\n after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.
\nThe values returned are strongly consistent.
" } }, "ReturnConsumedCapacity": { @@ -12161,7 +12161,7 @@ "ReturnValuesOnConditionCheckFailure": { "target": "com.amazonaws.dynamodb#ReturnValuesOnConditionCheckFailure", "traits": { - "smithy.api#documentation": "An optional parameter that returns the item attributes for an UpdateItem
operation that failed a\n condition check.
There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.
" + "smithy.api#documentation": "An optional parameter that returns the item attributes for an UpdateItem
\n operation that failed a condition check.
There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.
" } } }, @@ -12176,13 +12176,13 @@ "Attributes": { "target": "com.amazonaws.dynamodb#AttributeMap", "traits": { - "smithy.api#documentation": "A map of attribute values as they appear before or after the UpdateItem
\n operation, as determined by the ReturnValues
parameter.
The Attributes
map is only present if the update was successful and ReturnValues
was\n specified as something other than NONE
in the request. Each element\n represents one attribute.
A map of attribute values as they appear before or after the UpdateItem
\n operation, as determined by the ReturnValues
parameter.
The Attributes
map is only present if the update was successful and\n ReturnValues
was specified as something other than NONE
in\n the request. Each element represents one attribute.
The capacity units consumed by the UpdateItem
operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity
is\n only returned if the ReturnConsumedCapacity
parameter was specified. For\n more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer\n Guide.
The capacity units consumed by the UpdateItem
operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity
is\n only returned if the ReturnConsumedCapacity
parameter was specified. For\n more information, see Capacity unity consumption for write operations in the Amazon\n DynamoDB Developer Guide.
The table name for the Kinesis streaming destination input. You can also provide the ARN of the table in this parameter.
", + "smithy.api#documentation": "The table name for the Kinesis streaming destination input. You can also provide the\n ARN of the table in this parameter.
", "smithy.api#required": {} } }, @@ -12377,7 +12377,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB\n Streams settings for a given table.
\nFor global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n
\nYou can only perform one of the following operations at once:
\nModify the provisioned throughput settings of the table.
\nRemove a global secondary index from the table.
\nCreate a new global secondary index on the table. After the index begins\n backfilling, you can use UpdateTable
to perform other\n operations.
\n UpdateTable
is an asynchronous operation; while it's executing, the table\n status changes from ACTIVE
to UPDATING
. While it's\n UPDATING
, you can't issue another UpdateTable
request.\n When the table returns to the ACTIVE
state, the UpdateTable
\n operation is complete.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB\n Streams settings for a given table.
\nFor global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).
\nYou can only perform one of the following operations at once:
\nModify the provisioned throughput settings of the table.
\nRemove a global secondary index from the table.
\nCreate a new global secondary index on the table. After the index begins\n backfilling, you can use UpdateTable
to perform other\n operations.
\n UpdateTable
is an asynchronous operation; while it's executing, the table\n status changes from ACTIVE
to UPDATING
. While it's\n UPDATING
, you can't issue another UpdateTable
request.\n When the table returns to the ACTIVE
state, the UpdateTable
\n operation is complete.
The name of the table to be updated. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to be updated. You can also provide the Amazon Resource Name (ARN) of the table\n in this parameter.
", "smithy.api#required": {} } }, @@ -12429,7 +12429,7 @@ "ReplicaUpdates": { "target": "com.amazonaws.dynamodb#ReplicationGroupUpdateList", "traits": { - "smithy.api#documentation": "A list of replica update actions (create, delete, or update) for the table.
\nFor global tables, this property only applies to global tables using Version 2019.11.21 (Current version). \n
\nA list of replica update actions (create, delete, or update) for the table.
\nFor global tables, this property only applies to global tables using Version\n 2019.11.21 (Current version).
\nIndicates whether deletion protection is to be enabled (true) or disabled (false) on the table.
" + "smithy.api#documentation": "Indicates whether deletion protection is to be enabled (true) or disabled (false) on\n the table.
" } }, "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "Updates the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits
, MaxWriteRequestUnits
, or both.
Updates the maximum number of read and write units for the specified table in\n on-demand capacity mode. If you use this parameter, you must specify\n MaxReadRequestUnits
, MaxWriteRequestUnits
, or both.
Updates auto scaling settings on your global tables at once.
\nFor global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n
\nUpdates auto scaling settings on your global tables at once.
\nFor global tables, this operation only applies to global tables using Version\n 2019.11.21 (Current version).
\nThe name of the global table to be updated. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the global table to be updated. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.
", "smithy.api#required": {} } }, @@ -12579,7 +12579,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "The name of the table to be configured. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.
", + "smithy.api#documentation": "The name of the table to be configured. You can also provide the Amazon Resource Name (ARN) of the\n table in this parameter.
", "smithy.api#required": {} } }, diff --git a/models/ec2.json b/models/ec2.json index 20f2ebe277..7e0de0da2b 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -2303,6 +2303,9 @@ { "target": "com.amazonaws.ec2#CreateCapacityReservation" }, + { + "target": "com.amazonaws.ec2#CreateCapacityReservationBySplitting" + }, { "target": "com.amazonaws.ec2#CreateCapacityReservationFleet" }, @@ -3809,6 +3812,9 @@ { "target": "com.amazonaws.ec2#MoveByoipCidrToIpam" }, + { + "target": "com.amazonaws.ec2#MoveCapacityReservationInstances" + }, { "target": "com.amazonaws.ec2#ProvisionByoipCidr" }, @@ -5353,7 +5359,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "DestinationPrefixListId", - "smithy.api#documentation": "The prefix of the Amazon Web Service.
", + "smithy.api#documentation": "The prefix of the Amazon Web Services service.
", "smithy.api#xmlName": "destinationPrefixListId" } }, @@ -13908,6 +13914,94 @@ "smithy.api#documentation": "Creates a new Capacity Reservation with the specified attributes.
\nCapacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This \n\t\t\tgives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. \n\t\t\tBy creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. \n\t\t\tFor more information, see Capacity Reservations in the Amazon EC2 User Guide.
\nYour request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to\n\t\t\tfulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try\n\t\t\tagain at a later time, try in a different Availability Zone, or request a smaller\n\t\t\tcapacity reservation. If your application is flexible across instance types and sizes,\n\t\t\ttry to create a Capacity Reservation with different instance attributes.
\nYour request could also fail if the requested quantity exceeds your On-Demand Instance\n\t\t\tlimit for the selected instance type. If your request fails due to limit constraints,\n\t\t\tincrease your On-Demand Instance limit for the required instance type and try again. For\n\t\t\tmore information about increasing your instance limits, see Amazon EC2 Service\n\t\t\t\tQuotas in the Amazon EC2 User Guide.
" } }, + "com.amazonaws.ec2#CreateCapacityReservationBySplitting": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#CreateCapacityReservationBySplittingRequest" + }, + "output": { + "target": "com.amazonaws.ec2#CreateCapacityReservationBySplittingResult" + }, + "traits": { + "smithy.api#documentation": "\n\t\t\tCreate a new Capacity Reservation by splitting the available capacity of the source Capacity Reservation. The new Capacity Reservation will have the same attributes as the source Capacity Reservation except for tags. The source Capacity Reservation must be active
and owned by your Amazon Web Services account.\n\t\t
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.
", + "smithy.api#idempotencyToken": {} + } + }, + "SourceCapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "\n\t\t\tThe ID of the Capacity Reservation from which you want to split the available capacity. \n\t\t
", + "smithy.api#required": {} + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "\n\t\t\tThe number of instances to split from the source Capacity Reservation.\n\t\t
", + "smithy.api#required": {} + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "\n\t\t\tThe tags to apply to the new Capacity Reservation.\n\t\t
", + "smithy.api#xmlName": "TagSpecification" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateCapacityReservationBySplittingResult": { + "type": "structure", + "members": { + "SourceCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "SourceCapacityReservation", + "smithy.api#documentation": "\n\t\t\tInformation about the source Capacity Reservation.\n\t\t
", + "smithy.api#xmlName": "sourceCapacityReservation" + } + }, + "DestinationCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "DestinationCapacityReservation", + "smithy.api#documentation": "\n\t\t\tInformation about the destination Capacity Reservation.\n\t\t
", + "smithy.api#xmlName": "destinationCapacityReservation" + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "InstanceCount", + "smithy.api#documentation": "\n\t\t\tThe number of instances in the new Capacity Reservation. The number of instances in the source Capacity Reservation was reduced by this amount.\n\t\t
", + "smithy.api#xmlName": "instanceCount" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#CreateCapacityReservationFleet": { "type": "operation", "input": { @@ -16156,6 +16250,12 @@ "traits": { "smithy.api#documentation": "IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.
" } + }, + "EnablePrivateGua": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.
" + } } }, "traits": { @@ -17603,7 +17703,7 @@ "AwsService": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The Amazon Web Service. Currently not supported.
" + "smithy.api#documentation": "The Amazon Web Services service. Currently not supported.
" } }, "Permission": { @@ -27125,7 +27225,7 @@ "target": "com.amazonaws.ec2#DescribeAddressTransfersResult" }, "traits": { - "smithy.api#documentation": "Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.
\nWhen you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for three days\n after the transfers have been accepted.
", + "smithy.api#documentation": "Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.
\nWhen you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for 14 days\n after the transfers have been accepted.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -35518,7 +35618,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "One or more filters.
\n\n network-interface-permission.network-interface-permission-id
- The ID of the\n\t\t\t\tpermission.
\n network-interface-permission.network-interface-id
- The ID of\n\t\t\t\t\tthe network interface.
\n network-interface-permission.aws-account-id
- The Amazon Web Services account ID.
\n network-interface-permission.aws-service
- The Amazon Web Service.
\n network-interface-permission.permission
- The type of\n\t\t\t\t\tpermission (INSTANCE-ATTACH
|\n\t\t\t\t\tEIP-ASSOCIATE
).
One or more filters.
\n\n network-interface-permission.network-interface-permission-id
- The ID of the\n\t\t\t\tpermission.
\n network-interface-permission.network-interface-id
- The ID of\n\t\t\t\t\tthe network interface.
\n network-interface-permission.aws-account-id
- The Amazon Web Services account ID.
\n network-interface-permission.aws-service
- The Amazon Web Services service.
\n network-interface-permission.permission
- The type of\n\t\t\t\t\tpermission (INSTANCE-ATTACH
|\n\t\t\t\t\tEIP-ASSOCIATE
).
One or more filters.
\n\n association.allocation-id
- The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.
\n association.association-id
- The association ID returned when the\n\t\t network interface was associated with an IPv4 address.
\n addresses.association.owner-id
- The owner ID of the addresses associated with the network interface.
\n addresses.association.public-ip
- The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).
\n addresses.primary
- Whether the private IPv4 address is the primary\n IP address associated with the network interface.
\n addresses.private-ip-address
- The private IPv4 addresses\n\t\t associated with the network interface.
\n association.ip-owner-id
- The owner of the Elastic IP address\n (IPv4) associated with the network interface.
\n association.public-ip
- The address of the Elastic IP address\n (IPv4) bound to the network interface.
\n association.public-dns-name
- The public DNS name for the network\n interface (IPv4).
\n attachment.attach-time
- The time that the network interface was attached to an instance.
\n attachment.attachment-id
- The ID of the interface attachment.
\n attachment.delete-on-termination
- Indicates whether the attachment is deleted when an instance is terminated.
\n attachment.device-index
- The device index to which the network interface is attached.
\n attachment.instance-id
- The ID of the instance to which the network interface is attached.
\n attachment.instance-owner-id
- The owner ID of the instance to which the network interface is attached.
\n attachment.status
- The status of the attachment (attaching
| attached
| detaching
| detached
).
\n availability-zone
- The Availability Zone of the network interface.
\n description
- The description of the network interface.
\n group-id
- The ID of a security group associated with the network interface.
\n ipv6-addresses.ipv6-address
- An IPv6 address associated with\n the network interface.
\n interface-type
- The type of network interface (api_gateway_managed
| \n\t\t aws_codestar_connections_managed
| branch
| \n\t\t ec2_instance_connect_endpoint
| efa
| efs
| \n\t\t gateway_load_balancer
| gateway_load_balancer_endpoint
| \n\t\t global_accelerator_managed
| \n\t\t interface
| iot_rules_managed
| \n\t\t lambda
| load_balancer
| \n\t\t nat_gateway
| network_load_balancer
| \n\t\t quicksight
| \n\t\t transit_gateway
| trunk
| \n\t\t vpc_endpoint
).
\n mac-address
- The MAC address of the network interface.
\n network-interface-id
- The ID of the network interface.
\n owner-id
- The Amazon Web Services account ID of the network interface owner.
\n private-dns-name
- The private DNS name of the network interface (IPv4).
\n private-ip-address
- The private IPv4 address or addresses of the\n network interface.
\n requester-id
- The alias or Amazon Web Services account ID of the principal or service that created the network interface.
\n requester-managed
- Indicates whether the network interface is being managed by an Amazon Web Service \n\t\t (for example, Amazon Web Services Management Console, Auto Scaling, and so on).
\n source-dest-check
- Indicates whether the network interface performs source/destination checking. \n\t\t A value of true
means checking is enabled, and false
means checking is disabled. \n\t\t The value must be false
for the network interface to perform network address translation (NAT) in your VPC.
\n status
- The status of the network interface. If the network interface is not attached to an instance, the status is available
; \n\t\t if a network interface is attached to an instance the status is in-use
.
\n subnet-id
- The ID of the subnet for the network interface.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n vpc-id
- The ID of the VPC for the network interface.
One or more filters.
\n\n association.allocation-id
- The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.
\n association.association-id
- The association ID returned when the\n\t\t network interface was associated with an IPv4 address.
\n addresses.association.owner-id
- The owner ID of the addresses associated with the network interface.
\n addresses.association.public-ip
- The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).
\n addresses.primary
- Whether the private IPv4 address is the primary\n IP address associated with the network interface.
\n addresses.private-ip-address
- The private IPv4 addresses\n\t\t associated with the network interface.
\n association.ip-owner-id
- The owner of the Elastic IP address\n (IPv4) associated with the network interface.
\n association.public-ip
- The address of the Elastic IP address\n (IPv4) bound to the network interface.
\n association.public-dns-name
- The public DNS name for the network\n interface (IPv4).
\n attachment.attach-time
- The time that the network interface was attached to an instance.
\n attachment.attachment-id
- The ID of the interface attachment.
\n attachment.delete-on-termination
- Indicates whether the attachment is deleted when an instance is terminated.
\n attachment.device-index
- The device index to which the network interface is attached.
\n attachment.instance-id
- The ID of the instance to which the network interface is attached.
\n attachment.instance-owner-id
- The owner ID of the instance to which the network interface is attached.
\n attachment.status
- The status of the attachment (attaching
| attached
| detaching
| detached
).
\n availability-zone
- The Availability Zone of the network interface.
\n description
- The description of the network interface.
\n group-id
- The ID of a security group associated with the network interface.
\n ipv6-addresses.ipv6-address
- An IPv6 address associated with\n the network interface.
\n interface-type
- The type of network interface (api_gateway_managed
| \n\t\t aws_codestar_connections_managed
| branch
| \n\t\t ec2_instance_connect_endpoint
| efa
| efs
| \n\t\t gateway_load_balancer
| gateway_load_balancer_endpoint
| \n\t\t global_accelerator_managed
| \n\t\t interface
| iot_rules_managed
| \n\t\t lambda
| load_balancer
| \n\t\t nat_gateway
| network_load_balancer
| \n\t\t quicksight
| \n\t\t transit_gateway
| trunk
| \n\t\t vpc_endpoint
).
\n mac-address
- The MAC address of the network interface.
\n network-interface-id
- The ID of the network interface.
\n owner-id
- The Amazon Web Services account ID of the network interface owner.
\n private-dns-name
- The private DNS name of the network interface (IPv4).
\n private-ip-address
- The private IPv4 address or addresses of the\n network interface.
\n requester-id
- The alias or Amazon Web Services account ID of the principal or service that created the network interface.
\n requester-managed
- Indicates whether the network interface is being managed by an Amazon Web Services service \n\t\t (for example, Amazon Web Services Management Console, Auto Scaling, and so on).
\n source-dest-check
- Indicates whether the network interface performs source/destination checking. \n\t\t A value of true
means checking is enabled, and false
means checking is disabled. \n\t\t The value must be false
for the network interface to perform network address translation (NAT) in your VPC.
\n status
- The status of the network interface. If the network interface is not attached to an instance, the status is available
; \n\t\t if a network interface is attached to an instance the status is in-use
.
\n subnet-id
- The ID of the subnet for the network interface.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n vpc-id
- The ID of the VPC for the network interface.
The filters.
\n\n association.gateway-id
- The ID of the gateway involved in the\n\t\t association.
\n association.route-table-association-id
- The ID of an association\n ID for the route table.
\n association.route-table-id
- The ID of the route table involved in\n the association.
\n association.subnet-id
- The ID of the subnet involved in the\n association.
\n association.main
- Indicates whether the route table is the main\n route table for the VPC (true
| false
). Route tables\n that do not have an association ID are not returned in the response.
\n owner-id
- The ID of the Amazon Web Services account that owns the route table.
\n route-table-id
- The ID of the route table.
\n route.destination-cidr-block
- The IPv4 CIDR range specified in a\n route in the table.
\n route.destination-ipv6-cidr-block
- The IPv6 CIDR range specified in a route in the route table.
\n route.destination-prefix-list-id
- The ID (prefix) of the Amazon Web Service\n specified in a route in the table.
\n route.egress-only-internet-gateway-id
- The ID of an\n egress-only Internet gateway specified in a route in the route table.
\n route.gateway-id
- The ID of a gateway specified in a route in the table.
\n route.instance-id
- The ID of an instance specified in a route in the table.
\n route.nat-gateway-id
- The ID of a NAT gateway.
\n route.transit-gateway-id
- The ID of a transit gateway.
\n route.origin
- Describes how the route was created. \n CreateRouteTable
indicates that the route was automatically\n created when the route table was created; CreateRoute
indicates\n that the route was manually added to the route table;\n EnableVgwRoutePropagation
indicates that the route was\n propagated by route propagation.
\n route.state
- The state of a route in the route table\n (active
| blackhole
). The blackhole state\n indicates that the route's target isn't available (for example, the specified\n gateway isn't attached to the VPC, the specified NAT instance has been\n terminated, and so on).
\n route.vpc-peering-connection-id
- The ID of a VPC peering\n\t\t connection specified in a route in the table.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n vpc-id
- The ID of the VPC for the route table.
The filters.
\n\n association.gateway-id
- The ID of the gateway involved in the\n\t\t association.
\n association.route-table-association-id
- The ID of an association\n ID for the route table.
\n association.route-table-id
- The ID of the route table involved in\n the association.
\n association.subnet-id
- The ID of the subnet involved in the\n association.
\n association.main
- Indicates whether the route table is the main\n route table for the VPC (true
| false
). Route tables\n that do not have an association ID are not returned in the response.
\n owner-id
- The ID of the Amazon Web Services account that owns the route table.
\n route-table-id
- The ID of the route table.
\n route.destination-cidr-block
- The IPv4 CIDR range specified in a\n route in the table.
\n route.destination-ipv6-cidr-block
- The IPv6 CIDR range specified in a route in the route table.
\n route.destination-prefix-list-id
- The ID (prefix) of the Amazon Web Services service\n specified in a route in the table.
\n route.egress-only-internet-gateway-id
- The ID of an\n egress-only Internet gateway specified in a route in the route table.
\n route.gateway-id
- The ID of a gateway specified in a route in the table.
\n route.instance-id
- The ID of an instance specified in a route in the table.
\n route.nat-gateway-id
- The ID of a NAT gateway.
\n route.transit-gateway-id
- The ID of a transit gateway.
\n route.origin
- Describes how the route was created. \n CreateRouteTable
indicates that the route was automatically\n created when the route table was created; CreateRoute
indicates\n that the route was manually added to the route table;\n EnableVgwRoutePropagation
indicates that the route was\n propagated by route propagation.
\n route.state
- The state of a route in the route table\n (active
| blackhole
). The blackhole state\n indicates that the route's target isn't available (for example, the specified\n gateway isn't attached to the VPC, the specified NAT instance has been\n terminated, and so on).
\n route.vpc-peering-connection-id
- The ID of a VPC peering\n\t\t connection specified in a route in the table.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n vpc-id
- The ID of the VPC for the route table.
The ID of the AMI. An AMI is required to launch an instance. This parameter is only\n available for fleets of type instant
. For fleets of type maintain
\n and request
, you must specify the AMI ID in the launch template.
The ID of the AMI in the format ami-17characters00000
.
Alternatively, you can specify a Systems Manager parameter, using one of the following\n formats. The Systems Manager parameter will resolve to an AMI ID on launch.
\nTo reference a public parameter:
\n\n resolve:ssm:public-parameter\n
\n
To reference a parameter stored in the same account:
\n\n resolve:ssm:parameter-name\n
\n
\n resolve:ssm:parameter-name:version-number\n
\n
\n resolve:ssm:parameter-name:label\n
\n
To reference a parameter shared from another Amazon Web Services account:
\n\n resolve:ssm:parameter-ARN\n
\n
\n resolve:ssm:parameter-ARN:version-number\n
\n
\n resolve:ssm:parameter-ARN:label\n
\n
For more information, see Use a Systems Manager parameter instead of an AMI ID in the\n Amazon EC2 User Guide.
\nThis parameter is only available for fleets of type instant
. For fleets\n of type maintain
and request
, you must specify the AMI ID in\n the launch template.
The ID of the AMI. An AMI is required to launch an instance. This parameter is only\n available for fleets of type instant
. For fleets of type maintain
\n and request
, you must specify the AMI ID in the launch template.
The ID of the AMI in the format ami-17characters00000
.
Alternatively, you can specify a Systems Manager parameter, using one of the following\n formats. The Systems Manager parameter will resolve to an AMI ID on launch.
\nTo reference a public parameter:
\n\n resolve:ssm:public-parameter\n
\n
To reference a parameter stored in the same account:
\n\n resolve:ssm:parameter-name\n
\n
\n resolve:ssm:parameter-name:version-number\n
\n
\n resolve:ssm:parameter-name:label\n
\n
To reference a parameter shared from another Amazon Web Services account:
\n\n resolve:ssm:parameter-ARN\n
\n
\n resolve:ssm:parameter-ARN:version-number\n
\n
\n resolve:ssm:parameter-ARN:label\n
\n
For more information, see Use a Systems Manager parameter instead of an AMI ID in the\n Amazon EC2 User Guide.
\nThis parameter is only available for fleets of type instant
. For fleets\n of type maintain
and request
, you must specify the AMI ID in\n the launch template.
IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.
", "smithy.api#xmlName": "tier" } + }, + "EnablePrivateGua": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "EnablePrivateGua", + "smithy.api#documentation": "Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.
", + "smithy.api#xmlName": "enablePrivateGua" + } } }, "traits": { @@ -67846,6 +67977,14 @@ "smithy.api#xmlName": "resourceCidr" } }, + "IpSource": { + "target": "com.amazonaws.ec2#IpamResourceCidrIpSource", + "traits": { + "aws.protocols#ec2QueryName": "IpSource", + "smithy.api#documentation": "The source that allocated the IP address space. byoip
or amazon
indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none
indicates private space.
The CIDR for an IPAM resource.
" } }, + "com.amazonaws.ec2#IpamResourceCidrIpSource": { + "type": "enum", + "members": { + "amazon": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "amazon" + } + }, + "byoip": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "byoip" + } + }, + "none": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "none" + } + } + } + }, "com.amazonaws.ec2#IpamResourceCidrSet": { "type": "list", "member": { @@ -70039,6 +70201,23 @@ "com.amazonaws.ec2#Ipv6Address": { "type": "string" }, + "com.amazonaws.ec2#Ipv6AddressAttribute": { + "type": "enum", + "members": { + "public": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "public" + } + }, + "private": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "private" + } + } + } + }, "com.amazonaws.ec2#Ipv6AddressList": { "type": "list", "member": { @@ -74821,7 +75000,7 @@ "target": "com.amazonaws.ec2#ModifyCapacityReservationResult" }, "traits": { - "smithy.api#documentation": "Modifies a Capacity Reservation's capacity and the conditions under which it is to be released. You\n\t\t\tcannot change a Capacity Reservation's instance type, EBS optimization, instance store settings,\n\t\t\tplatform, Availability Zone, or instance eligibility. If you need to modify any of these\n\t\t\tattributes, we recommend that you cancel the Capacity Reservation, and then create a new one with\n\t\t\tthe required attributes.
" + "smithy.api#documentation": "Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under which it is to be released. You\n\t\t can't modify a Capacity Reservation's instance type, EBS optimization, platform, instance store settings, Availability Zone, or\n\t\t tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with\n\t\t the required attributes. For more information, see Modify an active Capacity Reservation.
" } }, "com.amazonaws.ec2#ModifyCapacityReservationFleet": { @@ -74938,6 +75117,12 @@ "traits": { "smithy.api#documentation": "Reserved for future use.
" } + }, + "InstanceMatchCriteria": { + "target": "com.amazonaws.ec2#InstanceMatchCriteria", + "traits": { + "smithy.api#documentation": "\n\t\t\tThe matching criteria (instance eligibility) that you want to use in the modified Capacity Reservation. If you change the instance eligibility of an existing Capacity Reservation from targeted
to open
, \n\t\t\tany running instances that match the attributes of the Capacity Reservation, have the CapacityReservationPreference
set to open
, and \n\t\t\tare not yet running in the Capacity Reservation, will automatically use the modified Capacity Reservation.\n\t\t
To modify the instance eligibility, the Capacity Reservation must be completely idle (zero usage).
" + } } }, "traits": { @@ -76580,6 +76765,12 @@ "traits": { "smithy.api#documentation": "IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.
" } + }, + "EnablePrivateGua": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.
" + } } }, "traits": { @@ -80390,6 +80581,95 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#MoveCapacityReservationInstances": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#MoveCapacityReservationInstancesRequest" + }, + "output": { + "target": "com.amazonaws.ec2#MoveCapacityReservationInstancesResult" + }, + "traits": { + "smithy.api#documentation": "Move available capacity from a source Capacity Reservation to a destination Capacity Reservation. The source Capacity Reservation and the destination Capacity Reservation must be active
, owned by your Amazon Web Services account, and share the following:\n\t\t
Instance type
\nPlatform
\nAvailability Zone
\nTenancy
\nPlacement group
\nCapacity Reservation end time - At specific time
or Manually
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.
", + "smithy.api#idempotencyToken": {} + } + }, + "SourceCapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "\n\t\t\tThe ID of the Capacity Reservation from which you want to move capacity.\n\t\t
", + "smithy.api#required": {} + } + }, + "DestinationCapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "\n\t\t\tThe ID of the Capacity Reservation that you want to move capacity into.\n\t\t
", + "smithy.api#required": {} + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The number of instances that you want to move from the source Capacity Reservation.\n\t\t
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#MoveCapacityReservationInstancesResult": { + "type": "structure", + "members": { + "SourceCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "SourceCapacityReservation", + "smithy.api#documentation": "\n\t\t\tInformation about the source Capacity Reservation.\n\t\t
", + "smithy.api#xmlName": "sourceCapacityReservation" + } + }, + "DestinationCapacityReservation": { + "target": "com.amazonaws.ec2#CapacityReservation", + "traits": { + "aws.protocols#ec2QueryName": "DestinationCapacityReservation", + "smithy.api#documentation": "\n\t\t\tInformation about the destination Capacity Reservation.\n\t\t
", + "smithy.api#xmlName": "destinationCapacityReservation" + } + }, + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "InstanceCount", + "smithy.api#documentation": "\n\t\t\tThe number of instances that were moved from the source Capacity Reservation to the destination Capacity Reservation.\n\t\t
", + "smithy.api#xmlName": "instanceCount" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#MoveStatus": { "type": "enum", "members": { @@ -82287,7 +82567,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "AwsService", - "smithy.api#documentation": "The Amazon Web Service.
", + "smithy.api#documentation": "The Amazon Web Services service.
", "smithy.api#xmlName": "awsService" } }, @@ -84516,7 +84796,7 @@ "target": "com.amazonaws.ec2#ValueStringList", "traits": { "aws.protocols#ec2QueryName": "CidrSet", - "smithy.api#documentation": "The IP address range of the Amazon Web Service.
", + "smithy.api#documentation": "The IP address range of the Amazon Web Services service.
", "smithy.api#xmlName": "cidrSet" } }, @@ -85646,7 +85926,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The netmask length of the CIDR you would like to allocate to the public IPv4 pool.
", + "smithy.api#documentation": "The netmask length of the CIDR you would like to allocate to the public IPv4 pool. The least specific netmask length you can define is 24.
", "smithy.api#required": {} } }, @@ -88526,7 +88806,7 @@ "ImageId": { "target": "com.amazonaws.ec2#ImageId", "traits": { - "smithy.api#documentation": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which\n will resolve to an AMI ID on launch.
\nValid formats:
\n\n ami-17characters00000
\n
\n resolve:ssm:parameter-name
\n
\n resolve:ssm:parameter-name:version-number
\n
\n resolve:ssm:parameter-name:label
\n
\n resolve:ssm:public-parameter
\n
Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. \n If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID.
\nFor more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "The ID of the AMI in the format ami-17characters00000
.
Alternatively, you can specify a Systems Manager parameter, using one of the following\n formats. The Systems Manager parameter will resolve to an AMI ID on launch.
\nTo reference a public parameter:
\n\n resolve:ssm:public-parameter\n
\n
To reference a parameter stored in the same account:
\n\n resolve:ssm:parameter-name\n
\n
\n resolve:ssm:parameter-name:version-number\n
\n
\n resolve:ssm:parameter-name:label\n
\n
To reference a parameter shared from another Amazon Web Services account:
\n\n resolve:ssm:parameter-ARN\n
\n
\n resolve:ssm:parameter-ARN:version-number\n
\n
\n resolve:ssm:parameter-ARN:label\n
\n
For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.
\nIf the launch template will be used for an EC2 Fleet or Spot Fleet, note the\n following:
\nOnly EC2 Fleets of type instant
support specifying a Systems\n Manager parameter.
For EC2 Fleets of type maintain
or request
, or\n for Spot Fleets, you must specify the AMI ID.
The prefix of the Amazon Web Service.
", + "smithy.api#documentation": "The prefix of the Amazon Web Services service.
", "smithy.api#xmlName": "destinationPrefixListId" } }, @@ -98536,6 +98810,22 @@ "smithy.api#documentation": "The state of the CIDR block.
", "smithy.api#xmlName": "ipv6CidrBlockState" } + }, + "Ipv6AddressAttribute": { + "target": "com.amazonaws.ec2#Ipv6AddressAttribute", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6AddressAttribute", + "smithy.api#documentation": "Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services.
", + "smithy.api#xmlName": "ipv6AddressAttribute" + } + }, + "IpSource": { + "target": "com.amazonaws.ec2#IpSource", + "traits": { + "aws.protocols#ec2QueryName": "IpSource", + "smithy.api#documentation": "The source that allocated the IP address space. byoip
or amazon
indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none
indicates private space.
The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.
", "smithy.api#xmlName": "ipv6Pool" } + }, + "Ipv6AddressAttribute": { + "target": "com.amazonaws.ec2#Ipv6AddressAttribute", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6AddressAttribute", + "smithy.api#documentation": "Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services.
", + "smithy.api#xmlName": "ipv6AddressAttribute" + } + }, + "IpSource": { + "target": "com.amazonaws.ec2#IpSource", + "traits": { + "aws.protocols#ec2QueryName": "IpSource", + "smithy.api#documentation": "The source that allocated the IP address space. byoip
or amazon
indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none
indicates private space.
The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.
\nAmazon ECR Public (ecr-public
) - public.ecr.aws
\n
Docker Hub (docker-hub
) -\n registry-1.docker.io
\n
Quay (quay
) - quay.io
\n
Kubernetes (k8s
) - registry.k8s.io
\n
GitHub Container Registry (github-container-registry
) -\n ghcr.io
\n
Microsoft Azure Container Registry (azure-container-registry
) -\n
\n
GitLab Container Registry (gitlab-container-registry
) -\n registry.gitlab.com
\n
The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.
\nAmazon ECR Public (ecr-public
) - public.ecr.aws
\n
Docker Hub (docker-hub
) -\n registry-1.docker.io
\n
Quay (quay
) - quay.io
\n
Kubernetes (k8s
) - registry.k8s.io
\n
GitHub Container Registry (github-container-registry
) -\n ghcr.io
\n
Microsoft Azure Container Registry (azure-container-registry
) -\n
\n
Creates a repository creation template. This template is used to define the settings\n for repositories created by Amazon ECR on your behalf. For example, repositories created\n through pull through cache actions. For more information, see Private\n repository creation templates in the\n Amazon Elastic Container Registry User Guide.
" + } + }, + "com.amazonaws.ecr#CreateRepositoryCreationTemplateRequest": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "The repository namespace prefix to associate with the template. All repositories\n created using this namespace prefix will have the settings defined in this template\n applied. For example, a prefix of prod
would apply to all repositories\n beginning with prod/
. Similarly, a prefix of prod/team
would\n apply to all repositories beginning with prod/team/
.
To apply a template to all repositories in your registry that don't have an associated\n creation template, you can use ROOT
as the prefix.
There is always an assumed /
applied to the end of the prefix. If you\n specify ecr-public
as the prefix, Amazon ECR treats that as\n ecr-public/
. When using a pull through cache rule, the repository\n prefix you specify during rule creation is what you should specify as your\n repository creation template prefix as well.
A description for the repository creation template.
" + } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The encryption configuration to use for repositories created using the\n template.
" + } + }, + "resourceTags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "The metadata to apply to the repository to help you categorize and organize. Each tag\n consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.
" + } + }, + "imageTagMutability": { + "target": "com.amazonaws.ecr#ImageTagMutability", + "traits": { + "smithy.api#documentation": "The tag mutability setting for the repository. If this parameter is omitted, the\n default setting of MUTABLE
will be used which will allow image tags to be\n overwritten. If IMMUTABLE
is specified, all image tags within the\n repository will be immutable which will prevent them from being overwritten.
The repository policy to apply to repositories created using the template. A\n repository policy is a permissions policy associated with a repository to control access\n permissions.
" + } + }, + "lifecyclePolicy": { + "target": "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The lifecycle policy to use for repositories created using the template.
" + } + }, + "appliedFor": { + "target": "com.amazonaws.ecr#RCTAppliedForList", + "traits": { + "smithy.api#documentation": "A list of enumerable strings representing the Amazon ECR repository creation scenarios that\n this template will apply towards. The two supported scenarios are\n PULL_THROUGH_CACHE
and REPLICATION
\n
The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as\n the registry that you are configuring. Amazon ECR will assume your supplied role when\n the customRoleArn is specified. When this field isn't specified, Amazon ECR will\n use the service-linked role for the repository creation template.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#CreateRepositoryCreationTemplateResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "The registry ID associated with the request.
" + } + }, + "repositoryCreationTemplate": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The details of the repository creation template associated with the request.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#CreateRepositoryRequest": { "type": "structure", "members": { @@ -2099,6 +2242,15 @@ "smithy.api#pattern": "^arn:aws:secretsmanager:[a-zA-Z0-9-:]+:secret:ecr\\-pullthroughcache\\/[a-zA-Z0-9\\/_+=.@-]+$" } }, + "com.amazonaws.ecr#CustomRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, "com.amazonaws.ecr#CvssScore": { "type": "structure", "members": { @@ -2472,6 +2624,67 @@ ] } }, + "com.amazonaws.ecr#DeleteRepositoryCreationTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#DeleteRepositoryCreationTemplateRequest" + }, + "output": { + "target": "com.amazonaws.ecr#DeleteRepositoryCreationTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#TemplateNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a repository creation template.
" + } + }, + "com.amazonaws.ecr#DeleteRepositoryCreationTemplateRequest": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "The repository namespace prefix associated with the repository creation\n template.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#DeleteRepositoryCreationTemplateResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "The registry ID associated with the request.
" + } + }, + "repositoryCreationTemplate": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The details of the repository creation template that was deleted.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#DeleteRepositoryPolicy": { "type": "operation", "input": { @@ -3056,7 +3269,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "The ID of the registry.
" + "smithy.api#documentation": "The registry ID associated with the request.
" } }, "replicationConfiguration": { @@ -3171,6 +3384,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.ecr#DescribeRepositoryCreationTemplates": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#DescribeRepositoryCreationTemplatesRequest" + }, + "output": { + "target": "com.amazonaws.ecr#DescribeRepositoryCreationTemplatesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns details about the repository creation templates in a registry. The\n prefixes
request parameter can be used to return the details for a\n specific repository creation template.
The repository namespace prefixes associated with the repository creation templates to\n describe. If this value is not specified, all repository creation templates are\n returned.
" + } + }, + "nextToken": { + "target": "com.amazonaws.ecr#NextToken", + "traits": { + "smithy.api#documentation": "The nextToken
value returned from a previous paginated\n DescribeRepositoryCreationTemplates
request where\n maxResults
was used and the results exceeded the value of that\n parameter. Pagination continues from the end of the previous results that returned the\n nextToken
value. This value is null
when there are no more\n results to return.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe maximum number of repository results returned by\n DescribeRepositoryCreationTemplatesRequest
in paginated output. When\n this parameter is used, DescribeRepositoryCreationTemplatesRequest
only\n returns maxResults
results in a single page along with a\n nextToken
response element. The remaining results of the initial\n request can be seen by sending another\n DescribeRepositoryCreationTemplatesRequest
request with the returned\n nextToken
value. This value can be between 1 and\n 1000. If this parameter is not used, then\n DescribeRepositoryCreationTemplatesRequest
returns up to\n 100 results and a nextToken
value, if applicable.
The registry ID associated with the request.
" + } + }, + "repositoryCreationTemplates": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplateList", + "traits": { + "smithy.api#documentation": "The details of the repository creation templates.
" + } + }, + "nextToken": { + "target": "com.amazonaws.ecr#NextToken", + "traits": { + "smithy.api#documentation": "The nextToken
value to include in a future\n DescribeRepositoryCreationTemplates
request. When the results of a\n DescribeRepositoryCreationTemplates
request exceed\n maxResults
, this value can be used to retrieve the next page of\n results. This value is null
when there are no more results to\n return.
The encryption type to use.
\nIf you use the KMS
encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.
If you use the AES256
encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES-256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.
The encryption type to use.
\nIf you use the KMS
encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.
If you use the AES256
encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.
The encryption configuration for the repository. This determines how the contents of\n your repository are encrypted at rest.
\nBy default, when no encryption configuration is set or the AES256
\n encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption\n keys which encrypts your data at rest using an AES-256 encryption algorithm. This does\n not require any action on your part.
For more control over the encryption of the contents of your repository, you can use\n server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your\n images. For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.
" + "smithy.api#documentation": "The encryption configuration for the repository. This determines how the contents of\n your repository are encrypted at rest.
\nBy default, when no encryption configuration is set or the AES256
\n encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption\n keys which encrypts your data at rest using an AES256 encryption algorithm. This does\n not require any action on your part.
For more control over the encryption of the contents of your repository, you can use\n server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your\n images. For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.
" + } + }, + "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate": { + "type": "structure", + "members": { + "encryptionType": { + "target": "com.amazonaws.ecr#EncryptionType", + "traits": { + "smithy.api#documentation": "The encryption type to use.
\nIf you use the KMS
encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.
If you use the AES256
encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.
If you use the KMS
encryption type, specify the KMS key to use for\n encryption. The full ARN of the KMS key must be specified. The key must exist in the\n same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS\n key for Amazon ECR will be used.
The encryption configuration to associate with the repository creation\n template.
" } }, "com.amazonaws.ecr#EncryptionType": { @@ -3409,6 +3724,64 @@ "smithy.api#default": false } }, + "com.amazonaws.ecr#GetAccountSetting": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#GetAccountSettingRequest" + }, + "output": { + "target": "com.amazonaws.ecr#GetAccountSettingResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves the basic scan type version name.
" + } + }, + "com.amazonaws.ecr#GetAccountSettingRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "Basic scan type version name.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#GetAccountSettingResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "Retrieves the basic scan type version name.
" + } + }, + "value": { + "target": "com.amazonaws.ecr#AccountSettingName", + "traits": { + "smithy.api#documentation": "Retrieves the value that specifies what basic scan type is being used:\n AWS_NATIVE
or CLAIR
.
The ID of the registry.
" + "smithy.api#documentation": "The registry ID associated with the request.
" } }, "policyText": { @@ -3881,7 +4254,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "The ID of the registry.
" + "smithy.api#documentation": "The registry ID associated with the request.
" } }, "scanningConfiguration": { @@ -4726,6 +5099,16 @@ } } }, + "com.amazonaws.ecr#KmsKeyForRepositoryCreationTemplate": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^$|arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key\\/[a-z0-9-]+$" + } + }, "com.amazonaws.ecr#Layer": { "type": "structure", "members": { @@ -5091,6 +5474,15 @@ } } }, + "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 30720 + } + } + }, "com.amazonaws.ecr#LifecyclePreviewMaxResults": { "type": "integer", "traits": { @@ -5401,6 +5793,22 @@ "com.amazonaws.ecr#Platform": { "type": "string" }, + "com.amazonaws.ecr#Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$" + } + }, + "com.amazonaws.ecr#PrefixList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#Prefix" + } + }, "com.amazonaws.ecr#ProxyEndpoint": { "type": "string" }, @@ -5509,6 +5917,74 @@ "com.amazonaws.ecr#PushTimestamp": { "type": "timestamp" }, + "com.amazonaws.ecr#PutAccountSetting": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#PutAccountSettingRequest" + }, + "output": { + "target": "com.amazonaws.ecr#PutAccountSettingResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#LimitExceededException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Allows you to change the basic scan type version by setting the name
\n parameter to either CLAIR
to AWS_NATIVE
.
Basic scan type version name.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.ecr#AccountSettingValue", + "traits": { + "smithy.api#documentation": "Setting value that determines what basic scan type is being used:\n AWS_NATIVE
or CLAIR
.
Retrieves the the basic scan type version name.
" + } + }, + "value": { + "target": "com.amazonaws.ecr#AccountSettingValue", + "traits": { + "smithy.api#documentation": "Retrieves the basic scan type value, either AWS_NATIVE
or\n -
.
The registry ID.
" + "smithy.api#documentation": "The registry ID associated with the request.
" } }, "policyText": { @@ -5985,7 +6461,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates or updates the replication configuration for a registry. The existing\n replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the\n PutReplicationConfiguration API is called, a service-linked IAM role is created in\n your account for the replication process. For more information, see Using\n service-linked roles for Amazon ECR in the\n Amazon Elastic Container Registry User Guide.
\nWhen configuring cross-account replication, the destination account must grant the\n source account permission to replicate. This permission is controlled using a\n registry permissions policy. For more information, see PutRegistryPolicy.
\nCreates or updates the replication configuration for a registry. The existing\n replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the\n PutReplicationConfiguration API is called, a service-linked IAM role is created in\n your account for the replication process. For more information, see Using\n service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide.\n For more information on the custom role for replication, see Creating an IAM role for replication.
\nWhen configuring cross-account replication, the destination account must grant the\n source account permission to replicate. This permission is controlled using a\n registry permissions policy. For more information, see PutRegistryPolicy.
\nThe repository namespace prefix associated with the repository creation\n template.
" + } + }, + "description": { + "target": "com.amazonaws.ecr#RepositoryTemplateDescription", + "traits": { + "smithy.api#documentation": "The description associated with the repository creation template.
" + } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The encryption configuration associated with the repository creation template.
" + } + }, + "resourceTags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "The metadata to apply to the repository to help you categorize and organize. Each tag\n consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.
" + } + }, + "imageTagMutability": { + "target": "com.amazonaws.ecr#ImageTagMutability", + "traits": { + "smithy.api#documentation": "The tag mutability setting for the repository. If this parameter is omitted, the\n default setting of MUTABLE will be used which will allow image tags to be overwritten.\n If IMMUTABLE is specified, all image tags within the repository will be immutable which\n will prevent them from being overwritten.
" + } + }, + "repositoryPolicy": { + "target": "com.amazonaws.ecr#RepositoryPolicyText", + "traits": { + "smithy.api#documentation": "he repository policy to apply to repositories created using the template. A repository\n policy is a permissions policy associated with a repository to control access\n permissions.
" + } + }, + "lifecyclePolicy": { + "target": "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The lifecycle policy to use for repositories created using the template.
" + } + }, + "appliedFor": { + "target": "com.amazonaws.ecr#RCTAppliedForList", + "traits": { + "smithy.api#documentation": "A list of enumerable Strings representing the repository creation scenarios that this\n template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and\n REPLICATION
" + } + }, + "customRoleArn": { + "target": "com.amazonaws.ecr#CustomRoleArn", + "traits": { + "smithy.api#documentation": "The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when\n the customRoleArn is specified. When this field isn't specified, Amazon ECR will\n use the service-linked role for the repository creation template.
" + } + }, + "createdAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "The date and time, in JavaScript date format, when the repository creation template\n was created.
" + } + }, + "updatedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "The date and time, in JavaScript date format, when the repository creation template\n was last updated.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The details of the repository creation template associated with the request.
" + } + }, + "com.amazonaws.ecr#RepositoryCreationTemplateList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate" + } + }, "com.amazonaws.ecr#RepositoryFilter": { "type": "structure", "members": { @@ -6571,6 +7150,15 @@ "target": "com.amazonaws.ecr#RepositoryScanningConfiguration" } }, + "com.amazonaws.ecr#RepositoryTemplateDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, "com.amazonaws.ecr#Resource": { "type": "structure", "members": { @@ -7290,6 +7878,30 @@ "target": "com.amazonaws.ecr#TagValue" } }, + "com.amazonaws.ecr#TemplateAlreadyExistsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "The repository creation template already exists. Specify a unique prefix and try\n again.
", + "smithy.api#error": "client" + } + }, + "com.amazonaws.ecr#TemplateNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "The specified repository creation template can't be found. Verify the registry ID and\n prefix and try again.
", + "smithy.api#error": "client" + } + }, "com.amazonaws.ecr#Title": { "type": "string" }, @@ -7533,6 +8145,112 @@ "smithy.api#output": {} } }, + "com.amazonaws.ecr#UpdateRepositoryCreationTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#UpdateRepositoryCreationTemplateRequest" + }, + "output": { + "target": "com.amazonaws.ecr#UpdateRepositoryCreationTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#TemplateNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates an existing repository creation template.
" + } + }, + "com.amazonaws.ecr#UpdateRepositoryCreationTemplateRequest": { + "type": "structure", + "members": { + "prefix": { + "target": "com.amazonaws.ecr#Prefix", + "traits": { + "smithy.api#documentation": "The repository namespace prefix that matches an existing repository creation template\n in the registry. All repositories created using this namespace prefix will have the\n settings defined in this template applied. For example, a prefix of prod
\n would apply to all repositories beginning with prod/
. This includes a\n repository named prod/team1
as well as a repository named\n prod/repository1
.
To apply a template to all repositories in your registry that don't have an associated\n creation template, you can use ROOT
as the prefix.
A description for the repository creation template.
" + } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.ecr#EncryptionConfigurationForRepositoryCreationTemplate" + }, + "resourceTags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "The metadata to apply to the repository to help you categorize and organize. Each tag\n consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.
" + } + }, + "imageTagMutability": { + "target": "com.amazonaws.ecr#ImageTagMutability", + "traits": { + "smithy.api#documentation": "Updates the tag mutability setting for the repository. If this parameter is omitted,\n the default setting of MUTABLE
will be used which will allow image tags to\n be overwritten. If IMMUTABLE
is specified, all image tags within the\n repository will be immutable which will prevent them from being overwritten.
Updates the repository policy created using the template. A repository policy is a\n permissions policy associated with a repository to control access permissions.
" + } + }, + "lifecyclePolicy": { + "target": "com.amazonaws.ecr#LifecyclePolicyTextForRepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "Updates the lifecycle policy associated with the specified repository creation\n template.
" + } + }, + "appliedFor": { + "target": "com.amazonaws.ecr#RCTAppliedForList", + "traits": { + "smithy.api#documentation": "Updates the list of enumerable strings representing the Amazon ECR repository creation\n scenarios that this template will apply towards. The two supported scenarios are\n PULL_THROUGH_CACHE
and REPLICATION
\n
The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as\n the registry that you are configuring. Amazon ECR will assume your supplied role when\n the customRoleArn is specified. When this field isn't specified, Amazon ECR will\n use the service-linked role for the repository creation template.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecr#UpdateRepositoryCreationTemplateResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "The registry ID associated with the request.
" + } + }, + "repositoryCreationTemplate": { + "target": "com.amazonaws.ecr#RepositoryCreationTemplate", + "traits": { + "smithy.api#documentation": "The details of the repository creation template associated with the request.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ecr#UpdatedTimestamp": { "type": "timestamp" }, diff --git a/models/ecs.json b/models/ecs.json index c9ef466b67..4bf7c79d66 100644 --- a/models/ecs.json +++ b/models/ecs.json @@ -1518,7 +1518,7 @@ } }, "traits": { - "smithy.api#documentation": "An object representing the networking details for a task or service. For example\n\t\t\t\tawsvpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}
\n
An object representing the networking details for a task or service. For example\n\t\t\t\tawsVpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}
.
The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.
\nOnly capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE
or UPDATING
status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.
With FARGATE_SPOT
, you can run interruption\n\t\t\ttolerant tasks at a rate that's discounted compared to the FARGATE
price.\n\t\t\t\tFARGATE_SPOT
runs tasks on spare compute capacity. When Amazon Web Services needs the\n\t\t\tcapacity back, your tasks are interrupted with a two-minute warning.\n\t\t\t\tFARGATE_SPOT
only supports Linux tasks with the X86_64 architecture on\n\t\t\tplatform version 1.3.0 or later.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" + "smithy.api#documentation": "The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.
\nOnly capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE
or UPDATING
status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.
With FARGATE_SPOT
, you can run interruption tolerant tasks at a rate\n\t\t\tthat's discounted compared to the FARGATE
price. FARGATE_SPOT
\n\t\t\truns tasks on spare compute capacity. When Amazon Web Services needs the capacity back, your tasks are\n\t\t\tinterrupted with a two-minute warning. FARGATE_SPOT
only supports Linux\n\t\t\ttasks with the X86_64 architecture on platform version 1.3.0 or later.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" } }, "com.amazonaws.ecs#CapacityProviderStrategyItemBase": { @@ -1762,7 +1762,7 @@ } }, "traits": { - "smithy.api#documentation": "These errors are usually caused by a client action. This client action might be using\n\t\t\tan action or resource on behalf of a user that doesn't have permissions to use the\n\t\t\taction or resource. Or, it might be specifying an identifier that isn't valid.
", + "smithy.api#documentation": "These errors are usually caused by a client action. This client action might be using\n\t\t\tan action or resource on behalf of a user that doesn't have permissions to use the\n\t\t\taction or resource. Or, it might be specifying an identifier that isn't valid.
\nThe following list includes additional causes for the error:
\nThe RunTask
could not be processed because you use managed\n\t\t\t\t\tscaling and there is a capacity error because the quota of tasks in the\n\t\t\t\t\t\tPROVISIONING
per cluster has been reached. For information\n\t\t\t\t\tabout the service quotas, see Amazon ECS\n\t\t\t\t\t\tservice quotas.
The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name
of one container can be entered in the\n\t\t\t\tlinks
of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--name
option to docker\n\t\t\trun.
The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name
of one container can be entered in the\n\t\t\t\tlinks
of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in tthe docker create-container command and the\n\t\t\t\t--name
option to docker\n\t\t\trun.
The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n
or \n repository-url/image@digest\n
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\tIMAGE
parameter of docker\n\t\t\t\trun.
When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.
\nImages in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag
or\n\t\t\t\t\t\tregistry/repository@digest
. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr.
\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr.
.\n\t\t\t\t
Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu
).
The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n
or \n repository-url/image@digest\n
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the docker create-container command and the\n\t\t\t\tIMAGE
parameter of docker\n\t\t\t\trun.
When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.
\nImages in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag
or\n\t\t\t\t\t\tregistry/repository@digest
. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr.
\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr.
.\n\t\t\t\t
Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu
).
The number of cpu
units reserved for the container. This parameter maps\n\t\t\tto CpuShares
in the Create a container section of the\n\t\t\tDocker Remote API and the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.
\nLinux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.
\nOn Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. For more\n\t\t\tinformation, see CPU share\n\t\t\t\tconstraint in the Docker documentation. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 in your container definitions. For CPU values below 2\n\t\t\t(including null), the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:
\n\n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.
\n\n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.
\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0
, which Windows interprets as 1% of one CPU.
The number of cpu
units reserved for the container. This parameter maps\n\t\t\tto CpuShares
in the docker create-container commandand the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.
\nLinux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.
\nOn Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2, and the\n\t\t\tmaximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2\n\t\t\t(including null) or above 262144, the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:
\n\n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.
\n\n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.
\n\n Agent versions greater than or equal to\n\t\t\t\t\t\t1.84.0: CPU values greater than 256 vCPU are passed to Docker as\n\t\t\t\t\t256, which is equivalent to 262144 CPU shares.
\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0
, which Windows interprets as 1% of one CPU.
The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory
value, if one is specified. This parameter maps to\n\t\t\t\tMemory
in the Create a container section of the\n\t\t\tDocker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
\nIf using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory
and memoryReservation
value, memory
\n\t\t\tmust be greater than memoryReservation
. If you specify\n\t\t\t\tmemoryReservation
, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" + "smithy.api#documentation": "The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory
value, if one is specified. This parameter maps to\n\t\t\tMemory
in thethe docker create-container command and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
\nIf using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory
and memoryReservation
value, memory
\n\t\t\tmust be greater than memoryReservation
. If you specify\n\t\t\t\tmemoryReservation
, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" } }, "memoryReservation": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory
parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation
in the Create a container section of\n\t\t\tthe Docker Remote API and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory
or memoryReservation
in a container\n\t\t\tdefinition. If you specify both, memory
must be greater than\n\t\t\t\tmemoryReservation
. If you specify memoryReservation
, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory
is\n\t\t\tused.
For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation
of 128 MiB, and a memory
hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" + "smithy.api#documentation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory
parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation
in the the docker create-container command and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory
or memoryReservation
in a container\n\t\t\tdefinition. If you specify both, memory
must be greater than\n\t\t\t\tmemoryReservation
. If you specify memoryReservation
, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory
is\n\t\t\tused.
For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation
of 128 MiB, and a memory
hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" } }, "links": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "The links
parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge
. The name:internalName
\n\t\t\tconstruct is analogous to name:alias
in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to\n\t\t\t\tLegacy container links\n\t\t\tin the Docker documentation. This parameter maps to Links
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--link
option to docker\n\t\t\trun.
This parameter is not supported for Windows containers.
\nContainers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.
\nThe links
parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge
. The name:internalName
\n\t\t\tconstruct is analogous to name:alias
in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links
in the docker create-container command and the\n\t\t\t\t--link
option to docker\n\t\t\trun.
This parameter is not supported for Windows containers.
\nContainers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.
\nThe list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.
\nFor task definitions that use the awsvpc
network mode, only specify the\n\t\t\t\tcontainerPort
. The hostPort
can be left blank or it must\n\t\t\tbe the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than\n\t\t\t\tlocalhost
. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--publish
option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none
,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost
, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings
section DescribeTasks\n\t\t\t\tresponses.
The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.
\nFor task definitions that use the awsvpc
network mode, only specify the\n\t\t\t\tcontainerPort
. The hostPort
can be left blank or it must\n\t\t\tbe the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than\n\t\t\t\tlocalhost
. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the\n\t\t\tthe docker create-container command and the\n\t\t\t\t--publish
option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none
,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost
, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings
section DescribeTasks\n\t\t\t\tresponses.
If the essential
parameter of a container is marked as true
,\n\t\t\tand that container fails or stops for any reason, all other containers that are part of\n\t\t\tthe task are stopped. If the essential
parameter of a container is marked\n\t\t\tas false
, its failure doesn't affect the rest of the containers in a task.\n\t\t\tIf this parameter is omitted, a container is assumed to be essential.
All tasks must have at least one essential container. If you have an application\n\t\t\tthat's composed of multiple containers, group containers that are used for a common\n\t\t\tpurpose into components, and separate the different components into multiple task\n\t\t\tdefinitions. For more information, see Application\n\t\t\t\tArchitecture in the Amazon Elastic Container Service Developer Guide.
" } }, + "restartPolicy": { + "target": "com.amazonaws.ecs#ContainerRestartPolicy", + "traits": { + "smithy.api#documentation": "The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the\n\t\t\ttask. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.
" + } + }, "entryPoint": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint
parameters. If you have problems using\n\t\t\t\t\tentryPoint
, update your container agent or enter your commands and\n\t\t\t\targuments as command
array items instead.
The entry point that's passed to the container. This parameter maps to\n\t\t\t\tEntrypoint
in the Create a container section of the\n\t\t\tDocker Remote API and the --entrypoint
option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.
Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint
parameters. If you have problems using\n\t\t\t\t\tentryPoint
, update your container agent or enter your commands and\n\t\t\t\targuments as command
array items instead.
The entry point that's passed to the container. This parameter maps to\n\t\t\tEntrypoint
in tthe docker create-container command and the --entrypoint
option to docker run.
The command that's passed to the container. This parameter maps to Cmd
in\n\t\t\tthe Create a container section of the Docker Remote API and the\n\t\t\t\tCOMMAND
parameter to docker\n\t\t\t\trun. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each\n\t\t\targument is a separated string in the array.
The command that's passed to the container. This parameter maps to Cmd
in\n\t\t\tthe docker create-container command and the\n\t\t\t\tCOMMAND
parameter to docker\n\t\t\t\trun. If there are multiple arguments, each\n\t\t\targument is a separated string in the array.
The environment variables to pass to a container. This parameter maps to\n\t\t\t\tEnv
in the Create a container section of the\n\t\t\tDocker Remote API and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.
\nThe environment variables to pass to a container. This parameter maps to\n\t\t\tEnv
in the docker create-container command and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.
\nA list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored. For more information about the environment variable file\n\t\t\tsyntax, see Declare default\n\t\t\t\tenvironment variables in file.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.
A list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.
The mount points for data volumes in your container.
\nThis parameter maps to Volumes
in the Create a container\n\t\t\tsection of the Docker Remote API and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData
. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.
The mount points for data volumes in your container.
\nThis parameter maps to Volumes
in the the docker create-container command and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData
. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.
Data volumes to mount from another container. This parameter maps to\n\t\t\t\tVolumesFrom
in the Create a container section of the\n\t\t\tDocker Remote API and the --volumes-from
option to docker run.
Data volumes to mount from another container. This parameter maps to\n\t\t\tVolumesFrom
in tthe docker create-container command and the --volumes-from
option to docker run.
Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE
,\n\t\t\tSUCCESS
, or HEALTHY
status. If a startTimeout
\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED
state.
When the ECS_CONTAINER_START_TIMEOUT
container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\nLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0
of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1
of the ecs-init
\n\t\t\tpackage. If your container instances are launched from version 20190301
or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init
. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
The valid values are 2-120 seconds.
" + "smithy.api#documentation": "Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE
,\n\t\t\tSUCCESS
, or HEALTHY
status. If a startTimeout
\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED
state.
When the ECS_CONTAINER_START_TIMEOUT
container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\nLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0
of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1
of the ecs-init
\n\t\t\tpackage. If your container instances are launched from version 20190301
or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init
. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
The valid values for Fargate are 2-120 seconds.
" } }, "stopTimeout": { @@ -2400,103 +2406,103 @@ "hostname": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The hostname to use for your container. This parameter maps to Hostname
\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--hostname
option to docker\n\t\t\t\trun.
The hostname
parameter is not supported if you're using the\n\t\t\t\t\tawsvpc
network mode.
The hostname to use for your container. This parameter maps to Hostname
\n\t\t\tin thethe docker create-container command and the\n\t\t\t\t--hostname
option to docker\n\t\t\t\trun.
The hostname
parameter is not supported if you're using the\n\t\t\t\t\tawsvpc
network mode.
The user to use inside the container. This parameter maps to User
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--user
option to docker\n\t\t\trun.
When running tasks using the host
network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.
You can specify the user
using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.
\n user
\n
\n user:group
\n
\n uid
\n
\n uid:gid
\n
\n user:gid
\n
\n uid:group
\n
This parameter is not supported for Windows containers.
\nThe user to use inside the container. This parameter maps to User
in the docker create-container command and the\n\t\t\t\t--user
option to docker\n\t\t\trun.
When running tasks using the host
network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.
You can specify the user
using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.
\n user
\n
\n user:group
\n
\n uid
\n
\n uid:gid
\n
\n user:gid
\n
\n uid:group
\n
This parameter is not supported for Windows containers.
\nThe working directory to run commands inside the container in. This parameter maps to\n\t\t\t\tWorkingDir
in the Create a container section of the\n\t\t\tDocker Remote API and the --workdir
option to docker run.
The working directory to run commands inside the container in. This parameter maps to\n\t\t\tWorkingDir
in the docker create-container command and the --workdir
option to docker run.
When this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled
in the Create a container section\n\t\t\tof the Docker Remote API.
This parameter is not supported for Windows containers.
\nWhen this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled
in the docker create-container command.
This parameter is not supported for Windows containers.
\nWhen this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root
user). This parameter maps to\n\t\t\t\tPrivileged
in the Create a container section of the\n\t\t\tDocker Remote API and the --privileged
option to docker run.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nWhen this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root
user). This parameter maps to\n\t\t\tPrivileged
in the the docker create-container command and the --privileged
option to docker run
This parameter is not supported for Windows containers or tasks run on Fargate.
\nWhen this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--read-only
option to docker\n\t\t\t\trun.
This parameter is not supported for Windows containers.
\nWhen this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs
in the docker create-container command and the\n\t\t\t\t--read-only
option to docker\n\t\t\t\trun.
This parameter is not supported for Windows containers.
\nA list of DNS servers that are presented to the container. This parameter maps to\n\t\t\t\tDns
in the Create a container section of the\n\t\t\tDocker Remote API and the --dns
option to docker run.
This parameter is not supported for Windows containers.
\nA list of DNS servers that are presented to the container. This parameter maps to\n\t\t\tDns
in the the docker create-container command and the --dns
option to docker run.
This parameter is not supported for Windows containers.
\nA list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch
in the Create a container section of the\n\t\t\tDocker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
\nA list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch
in the docker create-container command and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
\nA list of hostnames and IP address mappings to append to the /etc/hosts
\n\t\t\tfile on the container. This parameter maps to ExtraHosts
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--add-host
option to docker\n\t\t\t\trun.
This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc
network mode.
A list of hostnames and IP address mappings to append to the /etc/hosts
\n\t\t\tfile on the container. This parameter maps to ExtraHosts
in the docker create-container command and the\n\t\t\t\t--add-host
option to docker\n\t\t\t\trun.
This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc
network mode.
A list of strings to provide custom configuration for multiple security systems. For\n\t\t\tmore information about valid values, see Docker\n\t\t\t\tRun Security Configuration. This field isn't valid for containers in tasks\n\t\t\tusing the Fargate launch type.
\nFor Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.
\nFor any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.
\nThis parameter maps to SecurityOpt
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--security-opt
option to docker\n\t\t\t\trun.
The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
For more information about valid values, see Docker\n\t\t\t\tRun Security Configuration.
\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"
" + "smithy.api#documentation": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks\n\t\t\tusing the Fargate launch type.
\nFor Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.
\nFor any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.
\nThis parameter maps to SecurityOpt
in the docker create-container command and the\n\t\t\t\t--security-opt
option to docker\n\t\t\t\trun.
The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"
" } }, "interactive": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "When this parameter is true
, you can deploy containerized applications\n\t\t\tthat require stdin
or a tty
to be allocated. This parameter\n\t\t\tmaps to OpenStdin
in the Create a container section of the\n\t\t\tDocker Remote API and the --interactive
option to docker run.
When this parameter is true
, you can deploy containerized applications\n\t\t\tthat require stdin
or a tty
to be allocated. This parameter\n\t\t\tmaps to OpenStdin
in the docker create-container command and the --interactive
option to docker run.
When this parameter is true
, a TTY is allocated. This parameter maps to\n\t\t\t\tTty
in the Create a container section of the\n\t\t\tDocker Remote API and the --tty
option to docker run.
When this parameter is true
, a TTY is allocated. This parameter maps to\n\t\t\tTty
in tthe docker create-container command and the --tty
option to docker run.
A key/value map of labels to add to the container. This parameter maps to\n\t\t\t\tLabels
in the Create a container section of the\n\t\t\tDocker Remote API and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
A key/value map of labels to add to the container. This parameter maps to\n\t\t\tLabels
in the docker create-container command and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
A list of ulimits
to set in the container. If a ulimit
value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits
in the Create a container section\n\t\t\tof the Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 1024
and the default hard limit\n\t\t\t\t\t\t\tis 65535
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
This parameter is not supported for Windows containers.
\nA list of ulimits
to set in the container. If a ulimit
value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits
in tthe docker create-container command and the --ulimit
option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 65535
and the default hard limit\n\t\t\t\t\t\t\tis 65535
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
This parameter is not supported for Windows containers.
\nThe log configuration specification for the container.
\nThis parameter maps to LogConfig
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--log-driver
option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions). For more information about the options for different supported log drivers,\n\t\t\tsee Configure\n\t\t\t\tlogging drivers in the Docker documentation.
Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.
\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
The log configuration specification for the container.
\nThis parameter maps to LogConfig
in the docker create-container command and the\n\t\t\t\t--log-driver
option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions).
Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.
\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\tHEALTHCHECK
parameter of docker\n\t\t\t\trun.
The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck
in the docker create-container command and the\n\t\t\t\tHEALTHCHECK
parameter of docker\n\t\t\t\trun.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls
in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl
option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time
setting to maintain longer lived\n\t\t\tconnections.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls
in tthe docker create-container command and the --sysctl
option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time
setting to maintain longer lived\n\t\t\tconnections.
Specifies whether a restart policy is enabled for the\n\t\t\tcontainer.
", + "smithy.api#required": {} + } + }, + "ignoredExitCodes": { + "target": "com.amazonaws.ecs#IntegerList", + "traits": { + "smithy.api#documentation": "A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit\n\t\t\tcodes. By default, Amazon ECS does not ignore\n\t\t\tany exit codes.
" + } + }, + "restartAttemptPeriod": { + "target": "com.amazonaws.ecs#BoxedInteger", + "traits": { + "smithy.api#documentation": "A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be\n\t\t\trestarted only once every restartAttemptPeriod
seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum\n\t\t\trestartAttemptPeriod
of 60 seconds and a maximum restartAttemptPeriod
of 1800 seconds.\n\t\t\tBy default, a container must run for 300 seconds before it can be restarted.
You can enable a restart policy for each container defined in your\n\t\t\ttask definition, to overcome transient failures faster and maintain task availability. When you\n\t\t\tenable a restart policy for a container, Amazon ECS can restart the container if it exits, without needing to replace\n\t\t\tthe task. For more information, see Restart individual containers\n\t\t\t\tin Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.
" + } + }, "com.amazonaws.ecs#ContainerStateChange": { "type": "structure", "members": { @@ -3103,7 +3136,7 @@ } ], "traits": { - "smithy.api#documentation": "Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nIn addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations
is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information\n\t\t\tabout task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
", + "smithy.api#documentation": "Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nIn addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations
is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
", "smithy.api#examples": [ { "title": "To create a new service", @@ -3262,7 +3295,7 @@ "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\nThe FARGATE
launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.
Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS Developer Guide.
\nThe EC2
launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\nThe FARGATE
launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.
Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the Amazon ECS\n\t\t\t\t\tDeveloper Guide.
\nThe EC2
launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The platform version that your tasks in the service are running on. A platform version\n\t\t\tis specified only for tasks using the Fargate launch type. If one isn't\n\t\t\tspecified, the LATEST
platform version is used. For more information, see\n\t\t\t\tFargate platform versions in the Amazon Elastic Container Service Developer Guide.
The platform version that your tasks in the service are running on. A platform version\n\t\t\tis specified only for tasks using the Fargate launch type. If one isn't\n\t\t\tspecified, the LATEST
platform version is used. For more information, see\n\t\t\t\tFargate platform\n\t\t\t\tversions in the Amazon Elastic Container Service Developer Guide.
The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0
is used.
If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod
in\n\t\t\tthe task definition health check parameters. For more information, see Health\n\t\t\t\tcheck.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can\n\t\t\tspecify a health check grace period of up to 2,147,483,647 seconds (about 69 years).\n\t\t\tDuring that time, the Amazon ECS service scheduler ignores health check status. This grace\n\t\t\tperiod can prevent the service scheduler from marking tasks as unhealthy and stopping\n\t\t\tthem before they have time to come up.
" + "smithy.api#documentation": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0
is used.
If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod
in\n\t\t\tthe task definition health check parameters. For more information, see Health\n\t\t\t\tcheck.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to 2,147,483,647 seconds (about 69 years).\n\t\t\tDuring that time, the Amazon ECS service scheduler ignores health check status. This grace\n\t\t\tperiod can prevent the service scheduler from marking tasks as unhealthy and stopping\n\t\t\tthem before they have time to come up.
" } }, "schedulingStrategy": { @@ -3341,7 +3374,7 @@ "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.
\nYou must set this to a value other than NONE
when you use Cost Explorer. For more information, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide.
The default is NONE
.
Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.
\nYou must set this to a value other than NONE
when you use Cost Explorer.\n\t\t\tFor more information, see Amazon ECS usage reports\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
The default is NONE
.
Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL
deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nFor information about the maximum number of task sets and otther quotas, see Amazon ECS\n\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL
deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nFor information about the maximum number of task sets and other quotas, see Amazon ECS\n\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#CreateTaskSetRequest": { @@ -3555,29 +3588,29 @@ "smithy.api#documentation": "Disables an account setting for a specified user, role, or the root user for an\n\t\t\taccount.
", "smithy.api#examples": [ { - "title": "To delete your account setting", - "documentation": "This example deletes the account setting for your user for the specified resource type.", + "title": "To delete the account settings for a specific IAM user or IAM role", + "documentation": "This example deletes the account setting for a specific IAM user or IAM role for the specified resource type. Only the root user can view or modify the account settings for another user.", "input": { - "name": "serviceLongArnFormat" + "name": "containerInstanceLongArnFormat", + "principalArn": "arn:aws:iam::If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tminimumHealthyPercent
represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING
state during a deployment,\n\t\t\tas a percentage of the desiredCount
(rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount
of four tasks and a\n\t\t\t\tminimumHealthyPercent
of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following\n\t\t\tshould be noted:
\nA service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.
\nIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING
\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.
If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.
\nFor services that do use a load balancer, the following should be\n\t\t\tnoted:
\nIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.
\nIf a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.
\nThe default value for a replica service for\n\t\t\tminimumHealthyPercent
is 100%. The default\n\t\t\tminimumHealthyPercent
value for a service using\n\t\t\tthe DAEMON
service schedule is 0% for the CLI,\n\t\t\tthe Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console.
The minimum number of healthy tasks during a deployment is the\n\t\t\tdesiredCount
multiplied by the\n\t\t\tminimumHealthyPercent
/100, rounded up to the\n\t\t\tnearest integer value.
If a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING
\n\t\t\tstate while the container instances are in the DRAINING
state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY
) or EXTERNAL
\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.
If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tminimumHealthyPercent
represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING
state during a deployment,\n\t\t\tas a percentage of the desiredCount
(rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount
of four tasks and a\n\t\t\t\tminimumHealthyPercent
of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following\n\t\t\tshould be noted:
\nA service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.
\nIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING
\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.
If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.
\nFor services that do use a load balancer, the following should be\n\t\t\tnoted:
\nIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.
\nIf a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.
\nThe default value for a replica service for minimumHealthyPercent
is\n\t\t\t100%. The default minimumHealthyPercent
value for a service using the\n\t\t\t\tDAEMON
service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the\n\t\t\tAPIs and 50% for the Amazon Web Services Management Console.
The minimum number of healthy tasks during a deployment is the\n\t\t\t\tdesiredCount
multiplied by the minimumHealthyPercent
/100,\n\t\t\trounded up to the nearest integer value.
If a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING
\n\t\t\tstate while the container instances are in the DRAINING
state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY
) or EXTERNAL
\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.
Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment.
" + "smithy.api#documentation": "Specify an Key Management Service key ID to encrypt the ephemeral storage for\n\t\t\tdeployment.
" } } }, @@ -5558,19 +5591,19 @@ "driver": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls
to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. For more information, see Docker\n\t\t\t\tplugin discovery. This parameter maps to Driver
in the\n\t\t\tCreate a volume section of the Docker Remote API and the\n\t\t\t\txxdriver
option to docker\n\t\t\t\tvolume create.
The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls
to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. This parameter maps to Driver
in the docker create-container command and the\n\t\t\t\txxdriver
option to docker\n\t\t\t\tvolume create.
A map of Docker driver-specific options passed through. This parameter maps to\n\t\t\t\tDriverOpts
in the Create a volume section of the\n\t\t\tDocker Remote API and the xxopt
option to docker\n\t\t\t\tvolume create.
A map of Docker driver-specific options passed through. This parameter maps to\n\t\t\t\tDriverOpts
in the docker create-volume command and the xxopt
option to docker\n\t\t\t\tvolume create.
Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels
in the Create a volume section of the\n\t\t\tDocker Remote API and the xxlabel
option to docker\n\t\t\t\tvolume create.
Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels
in the docker create-container command and the xxlabel
option to docker\n\t\t\t\tvolume create.
The file type to use. Environment files are objects in Amazon S3. The only supported value is\n\t\t\t\ts3
.
The file type to use. Environment files are objects in Amazon S3. The only supported value\n\t\t\tis s3
.
A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env
file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Use a file to pass environment variables to a container in the Amazon Elastic Container Service Developer Guide.
Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations apply.
\nYou must use the following platforms for the Fargate launch type:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
Consider the following when using the Fargate launch type:
\nThe file is handled like a native Docker env-file.
\nThere is no support for shell escape handling.
\nThe container entry point interperts the VARIABLE
values.
A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env
file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Use a file to pass\n\t\t\t\tenvironment variables to a container in the Amazon Elastic Container Service Developer Guide.
Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations\n\t\t\tapply.
\nYou must use the following platforms for the Fargate launch type:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
Consider the following when using the Fargate launch type:
\nThe file is handled like a native Docker env-file.
\nThere is no support for shell escape handling.
\nThe container entry point interperts the VARIABLE
values.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported\n\t\t\tvalue is 20
GiB and the maximum supported value is\n\t\t\t\t200
GiB.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n\t\t\tsupported value is 20
GiB and the maximum supported value is\n\t\t\t\t200
GiB.
A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD
to run the command arguments\n\t\t\tdirectly, or CMD-SHELL
to run the command with the container's default\n\t\t\tshell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.
\n\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
\n
You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.
\n\n CMD-SHELL, curl -f http://localhost/ || exit 1
\n
An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck
in the Create a container\n\t\t\tsection of the Docker Remote API.
A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD
to run the command arguments\n\t\t\tdirectly, or CMD-SHELL
to run the command with the container's default\n\t\t\tshell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.
\n\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
\n
You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.
\n\n CMD-SHELL, curl -f http://localhost/ || exit 1
\n
An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck
in tthe docker create-container command
An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.
\nYou can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.
\nThe health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.
\nAmazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.
\nThe following describes the possible healthStatus
values for a\n\t\t\tcontainer:
\n HEALTHY
-The container health check has passed\n\t\t\t\t\tsuccessfully.
\n UNHEALTHY
-The container health check has failed.
\n UNKNOWN
-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.
The following describes the possible healthStatus
values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):
\n UNHEALTHY
-One or more essential containers have failed\n\t\t\t\t\ttheir health check.
\n UNKNOWN
-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN
state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY
state.
\n HEALTHY
-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.
Consider the following task health example with 2 containers.
\nIf Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tUNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tHEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
,\n\t\t\t\t\tthe task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
,\n\t\t\t\t\tthe task health is HEALTHY
.
Consider the following task health example with 3 containers.
\nIf Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tUNKNOWN
, and Container3 is UNKNOWN
, the task health is\n\t\t\t\t\t\tUNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tUNKNOWN
, and Container3 is HEALTHY
, the task health is\n\t\t\t\t\t\tUNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tHEALTHY
, and Container3 is HEALTHY
, the task health is\n\t\t\t\t\t\tUNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
,\n\t\t\t\t\tand Container3 is HEALTHY
, the task health is\n\t\t\t\t\tUNKNOWN
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
,\n\t\t\t\t\tand Container3 is UNKNOWN
, the task health is\n\t\t\t\t\tUNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
,\n\t\t\t\t\tand Container3 is HEALTHY
, the task health is\n\t\t\t\t\tHEALTHY
.
If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.
\nThe following are notes about container health check support:
\nIf the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't\n\t\t\t\t\tcause a container to transition to an UNHEALTHY
status. This is by design,\n\t\t\t\t\tto ensure that containers remain running during agent restarts or temporary\n\t\t\t\t\tunavailability. The health check status is the \"last heard from\" response from the Amazon ECS\n\t\t\t\t\tagent, so if the container was considered HEALTHY
prior to the disconnect,\n\t\t\t\t\tthat status will remain until the agent reconnects and another health check occurs.\n\t\t\t\t\tThere are no assumptions made about the status of the container health checks.
Container health checks require version 1.17.0
or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.
Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0
or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.
Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.
\nAn object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.
\nYou can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.
\nThe health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.
\nAmazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.
\nThe following describes the possible healthStatus
values for a\n\t\t\tcontainer:
\n HEALTHY
-The container health check has passed\n\t\t\t\t\tsuccessfully.
\n UNHEALTHY
-The container health check has failed.
\n UNKNOWN
-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.
The following describes the possible healthStatus
values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):
\n UNHEALTHY
-One or more essential containers have failed\n\t\t\t\t\ttheir health check.
\n UNKNOWN
-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN
state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY
state.
\n HEALTHY
-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.
Consider the following task health example with 2 containers.
\nIf Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tUNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tHEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
,\n\t\t\t\t\tthe task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
,\n\t\t\t\t\tthe task health is HEALTHY
.
Consider the following task health example with 3 containers.
\nIf Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tUNKNOWN
, and Container3 is UNKNOWN
, the task health is\n\t\t\t\t\t\tUNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tUNKNOWN
, and Container3 is HEALTHY
, the task health is\n\t\t\t\t\t\tUNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is\n\t\t\t\t\tHEALTHY
, and Container3 is HEALTHY
, the task health is\n\t\t\t\t\t\tUNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
,\n\t\t\t\t\tand Container3 is HEALTHY
, the task health is\n\t\t\t\t\tUNKNOWN
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
,\n\t\t\t\t\tand Container3 is UNKNOWN
, the task health is\n\t\t\t\t\tUNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
,\n\t\t\t\t\tand Container3 is HEALTHY
, the task health is\n\t\t\t\t\tHEALTHY
.
If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.
\nThe following are notes about container health check support:
\nIf the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this\n\t\t\t\t\twon't cause a container to transition to an UNHEALTHY
status. This\n\t\t\t\t\tis by design, to ensure that containers remain running during agent restarts or\n\t\t\t\t\ttemporary unavailability. The health check status is the \"last heard from\"\n\t\t\t\t\tresponse from the Amazon ECS agent, so if the container was considered\n\t\t\t\t\t\tHEALTHY
prior to the disconnect, that status will remain until\n\t\t\t\t\tthe agent reconnects and another health check occurs. There are no assumptions\n\t\t\t\t\tmade about the status of the container health checks.
Container health checks require version 1.17.0
or greater of the\n\t\t\t\t\tAmazon ECS container agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.
Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0
or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.
Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.
\nThe Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-add
option to docker\n\t\t\t\trun.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel\n\t\t\t\tcapability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd
in the docker create-container command and the\n\t\t\t\t--cap-add
option to docker\n\t\t\t\trun.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel\n\t\t\t\tcapability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-drop
option to docker\n\t\t\t\trun.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop
in the docker create-container command and the\n\t\t\t\t--cap-drop
option to docker\n\t\t\t\trun.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more information about the default capabilities\n\t\t\tand the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run\n\t\t\t\treference. For more detailed information about these Linux capabilities,\n\t\t\tsee the capabilities(7) Linux manual page.
" + "smithy.api#documentation": "The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more detailed information about these Linux capabilities,\n\t\t\tsee the capabilities(7) Linux manual page.
" } }, "com.amazonaws.ecs#KeyValuePair": { @@ -6618,7 +6657,7 @@ "devices": { "target": "com.amazonaws.ecs#DevicesList", "traits": { - "smithy.api#documentation": "Any host devices to expose to the container. This parameter maps to\n\t\t\t\tDevices
in the Create a container section of the\n\t\t\tDocker Remote API and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices
parameter isn't supported.
Any host devices to expose to the container. This parameter maps to\n\t\t\tDevices
in tthe docker create-container command and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices
parameter isn't supported.
The value for the size (in MiB) of the /dev/shm
volume. This parameter\n\t\t\tmaps to the --shm-size
option to docker\n\t\t\t\trun.
If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize
parameter is not supported.
The value for the size (in MiB) of the /dev/shm
volume. This parameter\n\t\t\tmaps to the --shm-size
option to docker\n\t\t\t\trun.
If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize
parameter is not supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs
parameter isn't supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs
parameter isn't supported.
This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness
value of 0
will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness
value of 100
will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0
and 100
. If the swappiness
parameter is not\n\t\t\tspecified, a default value of 60
is used. If a value is not specified for\n\t\t\t\tmaxSwap
then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness
parameter isn't supported.
If you're using tasks on Amazon Linux 2023 the swappiness
parameter isn't\n\t\t\t\tsupported.
This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness
value of 0
will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness
value of 100
will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0
and 100
. If the swappiness
parameter is not\n\t\t\tspecified, a default value of 60
is used. If a value is not specified for\n\t\t\t\tmaxSwap
then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness
parameter isn't supported.
If you're using tasks on Amazon Linux 2023 the swappiness
parameter isn't\n\t\t\t\tsupported.
Lists the account settings for a specified principal.
", "smithy.api#examples": [ { - "title": "To view your effective account settings", - "documentation": "This example displays the effective account settings for your account.", + "title": "To view the effective account settings for a specific IAM user or IAM role", + "documentation": "This example displays the effective account settings for the specified user or role.", "input": { - "effectiveSettings": true + "effectiveSettings": true, + "principalArn": "arn:aws:iam::Returns a list of task definition families that are registered to your account. This\n\t\t\tlist includes task definition families that no longer have any ACTIVE
task\n\t\t\tdefinition revisions.
You can filter out task definition families that don't contain any ACTIVE
\n\t\t\ttask definition revisions by setting the status
parameter to\n\t\t\t\tACTIVE
. You can also filter the results with the\n\t\t\t\tfamilyPrefix
parameter.
Returns a list of task definitions that are registered to your account. You can filter\n\t\t\tthe results by family name with the familyPrefix
parameter or by status\n\t\t\twith the status
parameter.
Returns a list of tasks. You can filter the results by cluster, task definition\n\t\t\tfamily, container instance, launch type, what IAM principal started the task, or by\n\t\t\tthe desired status of the task.
\nRecently stopped tasks might appear in the returned results.
", "smithy.api#examples": [ { - "title": "To list the tasks in a cluster", - "documentation": "This example lists all of the tasks in a cluster.", + "title": "To list the tasks on a particular container instance", + "documentation": "This example lists the tasks of a specified container instance. Specifying a ``containerInstance`` value limits the results to tasks that belong to that container instance.", "input": { - "cluster": "default" + "cluster": "default", + "containerInstance": "f6bbb147-5370-4ace-8c73-c7181ded911f" }, "output": { "taskArns": [ - "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84", - "arn:aws:ecs:us-east-1:012345678910:task/default/6b809ef6-c67e-4467-921f-ee261c15a0a1" + "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84" ] } }, { - "title": "To list the tasks on a particular container instance", - "documentation": "This example lists the tasks of a specified container instance. Specifying a ``containerInstance`` value limits the results to tasks that belong to that container instance.", + "title": "To list the tasks in a cluster", + "documentation": "This example lists all of the tasks in a cluster.", "input": { - "cluster": "default", - "containerInstance": "f6bbb147-5370-4ace-8c73-c7181ded911f" + "cluster": "default" }, "output": { "taskArns": [ - "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84" + "arn:aws:ecs:us-east-1:012345678910:task/default/0cc43cdb-3bee-4407-9c26-c0e6ea5bee84", + "arn:aws:ecs:us-east-1:012345678910:task/default/6b809ef6-c67e-4467-921f-ee261c15a0a1" ] } } @@ -7750,7 +7802,7 @@ "logDriver": { "target": "com.amazonaws.ecs#LogDriver", "traits": { - "smithy.api#documentation": "The log driver to use for the container.
\nFor tasks on Fargate, the supported log drivers are awslogs
,\n\t\t\t\tsplunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs
, fluentd
, gelf
,\n\t\t\t\tjson-file
, journald
,\n\t\t\t\tlogentries
,syslog
, splunk
, and\n\t\t\t\tawsfirelens
.
For more information about using the awslogs
log driver, see Using\n\t\t\t\tthe awslogs log driver in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.
\nThe log driver to use for the container.
\nFor tasks on Fargate, the supported log drivers are awslogs
,\n\t\t\t\tsplunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs
, fluentd
, gelf
,\n\t\t\t\tjson-file
, journald
, syslog
,\n\t\t\t\tsplunk
, and awsfirelens
.
For more information about using the awslogs
log driver, see Send\n\t\t\t\tAmazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Send\n\t\t\t\tAmazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner.
If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.
\nThe log configuration for the container. This parameter maps to LogConfig
\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--log-driver
option to \n docker\n\t\t\t\t\trun
\n .
By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition. For more information\n\t\t\tabout the options for different supported log drivers, see Configure logging\n\t\t\t\tdrivers in the Docker documentation.
\nUnderstand the following when specifying a log configuration for your\n\t\t\tcontainers.
\nAmazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.
\nFor tasks on Fargate, the supported log drivers are awslogs
,\n\t\t\t\t\t\tsplunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs
, fluentd
, gelf
,\n\t\t\t\t\t\tjson-file
, journald
,\n\t\t\t\t\t\tlogentries
,syslog
, splunk
, and\n\t\t\t\t\t\tawsfirelens
.
This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.
\nFor tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.
\nThe log configuration for the container. This parameter maps to LogConfig
\n\t\t\tin the docker create-container command and the\n\t\t\t\t--log-driver
option to docker\n\t\t\t\t\trun.
By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition.
\nUnderstand the following when specifying a log configuration for your\n\t\t\tcontainers.
\nAmazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.
\nFor tasks on Fargate, the supported log drivers are awslogs
,\n\t\t\t\t\t\tsplunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs
, fluentd
, gelf
,\n\t\t\t\t\t\tjson-file
, journald
,syslog
,\n\t\t\t\t\t\tsplunk
, and awsfirelens
.
This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.
\nFor tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.
\nPort mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.
\nIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, specify the exposed ports using containerPort
. The\n\t\t\t\thostPort
can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort
.
Most fields of this parameter (containerPort
, hostPort
,\n\t\t\t\tprotocol
) maps to PortBindings
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--publish
option to \n docker\n\t\t\t\t\trun
\n . If the network mode of a task definition is set to\n\t\t\t\thost
, host ports must either be undefined or match the container port\n\t\t\tin the port mapping.
You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.
\nAfter a task reaches the RUNNING
status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings
section of\n\t\t\t\tDescribeTasks API responses.
Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.
\nIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, specify the exposed ports using containerPort
. The\n\t\t\t\thostPort
can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort
.
Most fields of this parameter (containerPort
, hostPort
,\n\t\t\tprotocol
) maps to PortBindings
in the docker create-container command and the\n\t\t\t\t--publish
option to docker\n\t\t\t\t\trun
. If the network mode of a task definition is set to\n\t\t\t\thost
, host ports must either be undefined or match the container port\n\t\t\tin the port mapping.
You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.
\nAfter a task reaches the RUNNING
status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings
section of\n\t\t\t\tDescribeTasks API responses.
Modifies an account setting. Account settings are set on a per-Region basis.
\nIf you change the root user account setting, the default settings are reset for users and\n\t\t\troles that do not have specified individual account settings. For more information, see\n\t\t\t\tAccount\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.
", "smithy.api#examples": [ { - "title": "To modify your account settings", - "documentation": "This example modifies your account settings to opt in to the new ARN and resource ID format for Amazon ECS services. If you’re using this command as the root user, then changes apply to the entire AWS account, unless an IAM user or role explicitly overrides these settings for themselves.", + "title": "To modify the account settings for a specific IAM user or IAM role", + "documentation": "This example modifies the account setting for a specific IAM user or IAM role to opt in to the new ARN and resource ID format for Amazon ECS container instances. If you’re using this command as the root user, then changes apply to the entire AWS account, unless an IAM user or role explicitly overrides these settings for themselves.", "input": { - "name": "serviceLongArnFormat", - "value": "enabled" + "name": "containerInstanceLongArnFormat", + "value": "enabled", + "principalArn": "arn:aws:iam::Registers a new task definition from the supplied family
and\n\t\t\t\tcontainerDefinitions
. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes
parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.
You can specify a role for your task with the taskRoleArn
parameter. When\n\t\t\tyou specify a role for a task, its containers can then use the latest versions of the\n\t\t\tCLI or SDKs to make API requests to the Amazon Web Services services that are specified in the\n\t\t\tpolicy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode
parameter. The available network modes correspond to\n\t\t\tthose described in Network\n\t\t\t\tsettings in the Docker run reference. If you specify the awsvpc
\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with\n\t\t\tthe task definition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
Registers a new task definition from the supplied family
and\n\t\t\t\tcontainerDefinitions
. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes
parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.
You can specify a role for your task with the taskRoleArn
parameter. When\n\t\t\tyou specify a role for a task, its containers can then use the latest versions of the\n\t\t\tCLI or SDKs to make API requests to the Amazon Web Services services that are specified in the\n\t\t\tpolicy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode
parameter. If you specify the awsvpc
\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with\n\t\t\tthe task definition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required\n depending on the requirements of your task. For more information, see Amazon ECS task\n execution IAM role in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
" } }, "networkMode": { "target": "com.amazonaws.ecs#NetworkMode", "traits": { - "smithy.api#documentation": "The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
For more information, see Network\n settings in the Docker run reference.
" + "smithy.api#documentation": "The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
The process namespace to use for the containers in the task. The valid\n values are host
or task
. On Fargate for\n Linux containers, the only valid value is task
. For\n example, monitoring sidecars might need pidMode
to access\n information about other containers running in the same task.
If host
is specified, all containers within the tasks\n that specified the host
PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.
If task
is specified, all containers within the specified\n task share the same process namespace.
If no value is specified, the\n default is a private namespace for each container. For more information,\n see PID settings in the Docker run\n reference.
\nIf the host
PID mode is used, there's a heightened risk\n of undesired process namespace exposure. For more information, see\n Docker security.
This parameter is not supported for Windows containers.
\nThis parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0
or later\n (Linux). This isn't supported for Windows containers on\n Fargate.
The process namespace to use for the containers in the task. The valid\n values are host
or task
. On Fargate for\n Linux containers, the only valid value is task
. For\n example, monitoring sidecars might need pidMode
to access\n information about other containers running in the same task.
If host
is specified, all containers within the tasks\n that specified the host
PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.
If task
is specified, all containers within the specified\n task share the same process namespace.
If no value is specified, the\n default is a private namespace for each container.
\nIf the host
PID mode is used, there's a heightened risk\n of undesired process namespace exposure.
This parameter is not supported for Windows containers.
\nThis parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0
or later\n (Linux). This isn't supported for Windows containers on\n Fargate.
The IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe value for the specified resource type.
\nWhen the type is GPU
, the value is the number of physical GPUs
the\n\t\t\tAmazon ECS container agent reserves for the container. The number of GPUs that's reserved for\n\t\t\tall containers in a task can't exceed the number of available GPUs on the container\n\t\t\tinstance that the task is launched on.
When the type is InferenceAccelerator
, the value
matches\n\t\t\tthe deviceName
for an InferenceAccelerator specified in a task definition.
The value for the specified resource type.
\nWhen the type is GPU
, the value is the number of physical\n\t\t\t\tGPUs
the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.
When the type is InferenceAccelerator
, the value
matches the\n\t\t\t\tdeviceName
for an InferenceAccelerator specified in a task definition.
An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy
value. Up to 128 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy
value. Up to 128 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run. If a revision
isn't specified,\n\t\t\tthe latest ACTIVE
revision is used.
The full ARN value must match the value that you specified as the\n\t\t\t\tResource
of the principal's permissions policy.
When you specify a task definition, you must either specify a specific revision, or\n\t\t\tall revisions in the ARN.
\nTo specify a specific revision, include the revision number in the ARN. For example,\n\t\t\tto specify revision 2, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2
.
To specify all revisions, use the wildcard (*) in the ARN. For example, to specify all\n\t\t\trevisions, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*
.
For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
", + "smithy.api#documentation": "The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run. If a revision
isn't specified,\n\t\t\tthe latest ACTIVE
revision is used.
The full ARN value must match the value that you specified as the\n\t\t\t\tResource
of the principal's permissions policy.
When you specify a task definition, you must either specify a specific revision, or\n\t\t\tall revisions in the ARN.
\nTo specify a specific revision, include the revision number in the ARN. For example,\n\t\t\tto specify revision 2, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2
.
To specify all revisions, use the wildcard (*) in the ARN. For example, to specify\n\t\t\tall revisions, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*
.
For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
", "smithy.api#required": {} } }, @@ -9637,7 +9689,7 @@ "tasks": { "target": "com.amazonaws.ecs#Tasks", "traits": { - "smithy.api#documentation": "A full description of the tasks that were run. The tasks that were successfully placed\n\t\t\ton your cluster are described here.
\n " + "smithy.api#documentation": "A full description of the tasks that were run. The tasks that were successfully placed\n\t\t\ton your cluster are described here.
" } }, "failures": { @@ -10646,7 +10698,7 @@ "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy
value. Up to 36 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy
value. Up to 36 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
Stops a running task. Any tags associated with the task will be deleted.
\nWhen StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop
is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM
value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL
value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL
value is sent.
For Windows containers, POSIX signals do not work and runtime stops the container by sending\n\t\t\ta CTRL_SHUTDOWN_EVENT
. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.
The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT
variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
Stops a running task. Any tags associated with the task will be deleted.
\nWhen StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop
is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM
value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL
value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL
value is sent.
For Windows containers, POSIX signals do not work and runtime stops the container by\n\t\t\tsending a CTRL_SHUTDOWN_EVENT
. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.
The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT
variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls
in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl
option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time
setting to maintain longer lived\n\t\t\tconnections.
We don't recommend that you specify network-related systemControls
\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc
or host
network mode. Doing this has the following\n\t\t\tdisadvantages:
For tasks that use the awsvpc
network mode including Fargate,\n\t\t\t\t\tif you set systemControls
for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls
for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls
take effect.
For tasks that use the host
network mode, the network namespace\n\t\t\t\t\t\tsystemControls
aren't supported.
If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.
\nFor tasks that use the host
IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls
aren't supported.
For tasks that use the task
IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls
values apply to all containers within a\n\t\t\t\t\ttask.
This parameter is not supported for Windows containers.
\nThis parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0
or later\n (Linux). This isn't supported for Windows containers on\n Fargate.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls
in tthe docker create-container command and the --sysctl
option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time
setting to maintain longer lived\n\t\t\tconnections.
We don't recommend that you specify network-related systemControls
\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc
or host
network mode. Doing this has the following\n\t\t\tdisadvantages:
For tasks that use the awsvpc
network mode including Fargate,\n\t\t\t\t\tif you set systemControls
for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls
for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls
take effect.
For tasks that use the host
network mode, the network namespace\n\t\t\t\t\t\tsystemControls
aren't supported.
If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.
\nFor tasks that use the host
IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls
aren't supported.
For tasks that use the task
IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls
values apply to all containers within a\n\t\t\t\t\ttask.
This parameter is not supported for Windows containers.
\nThis parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0
or later\n (Linux). This isn't supported for Windows containers on\n Fargate.
The specified target wasn't found. You can view your available container instances\n\t\t\twith ListContainerInstances. Amazon ECS container instances are\n\t\t\tcluster-specific and Region-specific.
", + "smithy.api#documentation": "The specified target wasn't found. You can view your available container instances\n\t\t\twith ListContainerInstances. Amazon ECS container instances are cluster-specific and\n\t\t\tRegion-specific.
", "smithy.api#error": "client" } }, @@ -11501,19 +11553,19 @@ "taskRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS\n\t\t\t\tTask Role in the Amazon Elastic Container Service Developer Guide.
\nIAM roles for tasks on Windows require that the -EnableTaskIAMRole
\n\t\t\toption is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some\n\t\t\tconfiguration code to use the feature. For more information, see Windows IAM roles\n\t\t\t\tfor tasks in the Amazon Elastic Container Service Developer Guide.
The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For informationabout the required\n\t\t\tIAM roles for Amazon ECS, see IAM\n\t\t\t\troles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
" } }, "executionRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required\n depending on the requirements of your task. For more information, see Amazon ECS task\n execution IAM role in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent\n permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
" } }, "networkMode": { "target": "com.amazonaws.ecs#NetworkMode", "traits": { - "smithy.api#documentation": "The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
For more information, see Network\n settings in the Docker run reference.
" + "smithy.api#documentation": "The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
The number of cpu
units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The number of cpu
units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory
parameter.
If you use the EC2 launch type, this field is optional. Supported values\n\t\t\tare between 128
CPU units (0.125
vCPUs) and 10240
\n\t\t\tCPU units (10
vCPUs).
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The process namespace to use for the containers in the task. The valid\n values are host
or task
. On Fargate for\n Linux containers, the only valid value is task
. For\n example, monitoring sidecars might need pidMode
to access\n information about other containers running in the same task.
If host
is specified, all containers within the tasks\n that specified the host
PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.
If task
is specified, all containers within the specified\n task share the same process namespace.
If no value is specified, the\n default is a private namespace for each container. For more information,\n see PID settings in the Docker run\n reference.
\nIf the host
PID mode is used, there's a heightened risk\n of undesired process namespace exposure. For more information, see\n Docker security.
This parameter is not supported for Windows containers.
\nThis parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0
or later\n (Linux). This isn't supported for Windows containers on\n Fargate.
The process namespace to use for the containers in the task. The valid\n values are host
or task
. On Fargate for\n Linux containers, the only valid value is task
. For\n example, monitoring sidecars might need pidMode
to access\n information about other containers running in the same task.
If host
is specified, all containers within the tasks\n that specified the host
PID mode on the same container\n instance share the same process namespace with the host Amazon EC2\n instance.
If task
is specified, all containers within the specified\n task share the same process namespace.
If no value is specified, the\n default is a private namespace for each container.
\nIf the host
PID mode is used, there's a heightened risk\n of undesired process namespace exposure.
This parameter is not supported for Windows containers.
\nThis parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0
or later\n (Linux). This isn't supported for Windows containers on\n Fargate.
The IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe total amount, in GiB, of the ephemeral storage to set for the task. The minimum \t\t\n\t\t\tsupported value is 20
GiB and the maximum supported value is\u2028 200
\n\t\t\tGiB.
The total amount, in GiB, of the ephemeral storage to set for the task. The minimum\n\t\t\tsupported value is 20
GiB and the maximum supported value is\u2028\n\t\t\t\t200
GiB.
Specify an Key Management Service key ID to encrypt the ephemeral storage for the task.
" + "smithy.api#documentation": "Specify an Key Management Service key ID to encrypt the ephemeral storage for the\n\t\t\ttask.
" } } }, @@ -12313,7 +12365,7 @@ } }, "traits": { - "smithy.api#documentation": "The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 1024
and the default hard limit\n\t\t\t\t\t\t\tis 65535
.
You can specify the ulimit
settings for a container in a task\n\t\t\tdefinition.
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 65535
and the default hard limit\n\t\t\t\t\t\t\tis 65535
.
You can specify the ulimit
settings for a container in a task\n\t\t\tdefinition.
Modifies the status of an Amazon ECS container instance.
\nOnce a container instance has reached an ACTIVE
state, you can change the\n\t\t\tstatus of a container instance to DRAINING
to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.
A container instance can't be changed to DRAINING
until it has\n\t\t\t\treached an ACTIVE
status. If the instance is in any other status, an\n\t\t\t\terror will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING
\n\t\t\tstate are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent
and maximumPercent
. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING
\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING
state and are reported as healthy by the\n\t\t\t\t\tload balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
\n\t\t\ttasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE
status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.
Modifies the status of an Amazon ECS container instance.
\nOnce a container instance has reached an ACTIVE
state, you can change the\n\t\t\tstatus of a container instance to DRAINING
to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.
A container instance can't be changed to DRAINING
until it has\n\t\t\t\treached an ACTIVE
status. If the instance is in any other status, an\n\t\t\t\terror will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING
\n\t\t\tstate are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent
and maximumPercent
. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING
\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING
state and are reported as healthy by the\n\t\t\t\t\tload balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
\n\t\t\ttasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE
status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.
Modifies the parameters of a service.
\nOn March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nFor services using the rolling update (ECS
) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update\n\t\t\tyour volume configurations and trigger a new deployment.\n\t\t\t\tvolumeConfigurations
is only supported for REPLICA service and not\n\t\t\tDAEMON service. If you leave volumeConfigurations
\n null
, it doesn't trigger a new deployment. For more infomation on volumes,\n\t\t\tsee Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\nYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
\nIf you have updated the container image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\nIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\nDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\nBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\nSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\nPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\nWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\nSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\nStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\nYou must have a service-linked role when you update any of the following service\n\t\t\t\tproperties:
\n\n loadBalancers
,
\n serviceRegistries
\n
For more information about the role see the CreateService
request\n\t\t\t\tparameter \n role
\n .
Updates the protection status of a task. You can set protectionEnabled
to\n\t\t\t\ttrue
to protect your task from termination during scale-in events from\n\t\t\t\tService\n\t\t\t\tAutoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the\n\t\t\t\tprotectionEnabled
property making the task eligible for termination by\n\t\t\ta subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). To specify the custom expiration period, set the\n\t\t\t\texpiresInMinutes
property. The expiresInMinutes
property\n\t\t\tis always reset when you invoke this operation for a task that already has\n\t\t\t\tprotectionEnabled
set to true
. You can keep extending the\n\t\t\tprotection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in\n\t\t\t\tprotection in the \n Amazon Elastic Container Service Developer Guide\n .
\nThis operation is only supported for tasks belonging to an Amazon ECS service. Invoking\n\t\t\t\tthis operation for a standalone task will result in an TASK_NOT_VALID
\n\t\t\t\tfailure. For more information, see API failure\n\t\t\t\t\treasons.
If you prefer to set task protection from within the container, we recommend using\n\t\t\t\tthe Task scale-in protection endpoint.
\nThe access configuration for the cluster.
" } + }, + "upgradePolicy": { + "target": "com.amazonaws.eks#UpgradePolicyResponse", + "traits": { + "smithy.api#documentation": "This value indicates if extended support is enabled or disabled for the cluster.
\n\n Learn more about EKS Extended Support in the EKS User Guide.\n
" + } } }, "traits": { @@ -3331,6 +3349,12 @@ "traits": { "smithy.api#documentation": "If you set this value to False
when creating a cluster, the default networking add-ons will not be installed.
The default networking addons include vpc-cni, coredns, and kube-proxy.
\nUse this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.
" } + }, + "upgradePolicy": { + "target": "com.amazonaws.eks#UpgradePolicyRequest", + "traits": { + "smithy.api#documentation": "New clusters, by default, have extended support enabled. You can disable extended support when creating a cluster by setting this value to STANDARD
.
The access configuration for the cluster.
" } + }, + "upgradePolicy": { + "target": "com.amazonaws.eks#UpgradePolicyRequest", + "traits": { + "smithy.api#documentation": "You can enable or disable extended support for clusters currently on standard support. You cannot disable extended support once it starts. You must enable extended support before your cluster exits standard support.
" + } } }, "traits": { @@ -10591,6 +10638,12 @@ "traits": { "smithy.api#enumValue": "PodIdentityAssociations" } + }, + "UPGRADE_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UpgradePolicy" + } } } }, @@ -10793,9 +10846,43 @@ "traits": { "smithy.api#enumValue": "AccessConfigUpdate" } + }, + "UPGRADE_POLICY_UPDATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UpgradePolicyUpdate" + } } } }, + "com.amazonaws.eks#UpgradePolicyRequest": { + "type": "structure", + "members": { + "supportType": { + "target": "com.amazonaws.eks#SupportType", + "traits": { + "smithy.api#documentation": "If the cluster is set to EXTENDED
, it will enter extended support at the end of standard support. If the cluster is set to STANDARD
, it will be automatically upgraded at the end of standard support.
\n Learn more about EKS Extended Support in the EKS User Guide.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED
. Use STANDARD
to disable extended support.
\n Learn more about EKS Extended Support in the EKS User Guide.\n
" + } + }, + "com.amazonaws.eks#UpgradePolicyResponse": { + "type": "structure", + "members": { + "supportType": { + "target": "com.amazonaws.eks#SupportType", + "traits": { + "smithy.api#documentation": "If the cluster is set to EXTENDED
, it will enter extended support at the end of standard support. If the cluster is set to STANDARD
, it will be automatically upgraded at the end of standard support.
\n Learn more about EKS Extended Support in the EKS User Guide.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "This value indicates if extended support is enabled or disabled for the cluster.
\n\n Learn more about EKS Extended Support in the EKS User Guide.\n
" + } + }, "com.amazonaws.eks#VpcConfigRequest": { "type": "structure", "members": { diff --git a/models/elastic-load-balancing-v2.json b/models/elastic-load-balancing-v2.json index acc08e24c4..c8059f8ca2 100644 --- a/models/elastic-load-balancing-v2.json +++ b/models/elastic-load-balancing-v2.json @@ -1775,6 +1775,23 @@ "com.amazonaws.elasticloadbalancingv2#Default": { "type": "boolean" }, + "com.amazonaws.elasticloadbalancingv2#DeleteAssociationSameAccountException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.elasticloadbalancingv2#ErrorDescription" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "DeleteAssociationSameAccount", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "The specified association cannot be within the same account.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.elasticloadbalancingv2#DeleteListener": { "type": "operation", "input": { @@ -1934,6 +1951,70 @@ "smithy.api#output": {} } }, + "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationInput" + }, + "output": { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteAssociationSameAccountException" + }, + { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationNotFoundException" + }, + { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a shared trust store association.
", + "smithy.api#examples": [ + { + "title": "Delete a shared trust store association", + "documentation": "This example deletes the association between the specified trust store and the specified load balancer.", + "input": { + "TrustStoreArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:truststore/my-trust-store/73e2d6bc24d8a063", + "ResourceArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/app/my-load-balancer/80233fa81d678c2c" + } + } + ] + } + }, + "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationInput": { + "type": "structure", + "members": { + "TrustStoreArn": { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the trust store.
", + "smithy.api#required": {} + } + }, + "ResourceArn": { + "target": "com.amazonaws.elasticloadbalancingv2#ResourceArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociationOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.elasticloadbalancingv2#DeleteTargetGroup": { "type": "operation", "input": { @@ -3295,7 +3376,7 @@ "Include": { "target": "com.amazonaws.elasticloadbalancingv2#ListOfDescribeTargetHealthIncludeOptions", "traits": { - "smithy.api#documentation": "Used to inclue anomaly detection information.
" + "smithy.api#documentation": "Used to include anomaly detection information.
" } } }, @@ -3459,7 +3540,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the revocation files in use by the specified \n trust store arn, or revocation ID.
", + "smithy.api#documentation": "Describes the revocation files in use by the specified trust store or revocation\n files.
", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextMarker", @@ -3535,7 +3616,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes all trust stores for a given account \n by trust store arn’s or name.
", + "smithy.api#documentation": "Describes all trust stores for the specified account.
", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextMarker", @@ -3720,6 +3801,9 @@ { "target": "com.amazonaws.elasticloadbalancingv2#DeleteRule" }, + { + "target": "com.amazonaws.elasticloadbalancingv2#DeleteSharedTrustStoreAssociation" + }, { "target": "com.amazonaws.elasticloadbalancingv2#DeleteTargetGroup" }, @@ -3771,6 +3855,9 @@ { "target": "com.amazonaws.elasticloadbalancingv2#DescribeTrustStores" }, + { + "target": "com.amazonaws.elasticloadbalancingv2#GetResourcePolicy" + }, { "target": "com.amazonaws.elasticloadbalancingv2#GetTrustStoreCaCertificatesBundle" }, @@ -4952,6 +5039,62 @@ "smithy.api#documentation": "Information about a forward action.
" } }, + "com.amazonaws.elasticloadbalancingv2#GetResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyInput" + }, + "output": { + "target": "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyOutput" + }, + "errors": [ + { + "target": "com.amazonaws.elasticloadbalancingv2#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves the resource policy for a specified resource.
", + "smithy.api#examples": [ + { + "title": "Retrieve a resource policy", + "documentation": "This example retrieves the resource policy for the specified trust store.", + "input": { + "ResourceArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:truststore/my-trust-store/73e2d6bc24d8a067" + } + } + ] + } + }, + "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.elasticloadbalancingv2#ResourceArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.elasticloadbalancingv2#GetResourcePolicyOutput": { + "type": "structure", + "members": { + "Policy": { + "target": "com.amazonaws.elasticloadbalancingv2#Policy", + "traits": { + "smithy.api#documentation": "The content of the resource policy.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.elasticloadbalancingv2#GetTrustStoreCaCertificatesBundle": { "type": "operation", "input": { @@ -6611,7 +6754,7 @@ } ], "traits": { - "smithy.api#documentation": "Update the ca certificate bundle for a given trust store.
" + "smithy.api#documentation": "Update the ca certificate bundle for the specified trust store.
" } }, "com.amazonaws.elasticloadbalancingv2#ModifyTrustStoreInput": { @@ -6686,6 +6829,12 @@ "traits": { "smithy.api#documentation": "Indicates whether expired client certificates are ignored.
" } + }, + "TrustStoreAssociationStatus": { + "target": "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationStatusEnum", + "traits": { + "smithy.api#documentation": "Indicates a shared trust stores association status.
" + } } }, "traits": { @@ -6753,6 +6902,14 @@ "smithy.api#documentation": "Information about a path pattern condition.
" } }, + "com.amazonaws.elasticloadbalancingv2#Policy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, "com.amazonaws.elasticloadbalancingv2#Port": { "type": "integer", "traits": { @@ -7268,6 +7425,23 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.elasticloadbalancingv2#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.elasticloadbalancingv2#ErrorDescription" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ResourceNotFound", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "The specified resource does not exist.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.elasticloadbalancingv2#RevocationContent": { "type": "structure", "members": { @@ -7600,7 +7774,7 @@ "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "Note: Internal load balancers must use the ipv4
IP address type.
[Application Load Balancers] The IP address type. The possible values are \n ipv4
(for only IPv4 addresses), dualstack
(for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4
(for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).
[Network Load Balancers] The IP address type. The possible values are \n ipv4
(for only IPv4 addresses) and dualstack
\n (for IPv4 and IPv6 addresses). You can’t specify dualstack
\n for a load balancer with a UDP or TCP_UDP listener.
[Gateway Load Balancers] The IP address type. The possible values are \n ipv4
(for only IPv4 addresses) and dualstack
\n (for IPv4 and IPv6 addresses).
Note: Internal load balancers must use the ipv4
IP address type.
[Application Load Balancers] The IP address type. The possible values are \n ipv4
(for only IPv4 addresses), dualstack
(for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4
(for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).
Note: Application Load Balancer authentication only supports IPv4 addresses when \n connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public \n IPv4 address the load balancer cannot complete the authentication process, resulting \n in HTTP 500 errors.
\n[Network Load Balancers] The IP address type. The possible values are \n ipv4
(for only IPv4 addresses) and dualstack
\n (for IPv4 and IPv6 addresses). You can’t specify dualstack
\n for a load balancer with a UDP or TCP_UDP listener.
[Gateway Load Balancers] The IP address type. The possible values are \n ipv4
(for only IPv4 addresses) and dualstack
\n (for IPv4 and IPv6 addresses).
Information about the resources a trust store is associated with.
" } }, + "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.elasticloadbalancingv2#ErrorDescription" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "AssociationNotFound", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "The specified association does not exist.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationResourceArn": { "type": "string" }, + "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociationStatusEnum": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "active" + } + }, + "REMOVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "removed" + } + } + } + }, "com.amazonaws.elasticloadbalancingv2#TrustStoreAssociations": { "type": "list", "member": { diff --git a/models/elastic-load-balancing.json b/models/elastic-load-balancing.json index a246f7688e..6f2556ab02 100644 --- a/models/elastic-load-balancing.json +++ b/models/elastic-load-balancing.json @@ -937,6 +937,111 @@ "output": { "DNSName": "my-load-balancer-1234567890.us-west-2.elb.amazonaws.com" } + }, + { + "title": "To create an HTTP load balancer in EC2-Classic", + "documentation": "This example creates a load balancer with an HTTP listener in EC2-Classic.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + } + ], + "AvailabilityZones": [ + "us-west-2a" + ] + }, + "output": { + "DNSName": "my-load-balancer-123456789.us-west-2.elb.amazonaws.com" + } + }, + { + "title": "To create an HTTPS load balancer in a VPC", + "documentation": "This example creates a load balancer with an HTTPS listener in a VPC.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + }, + { + "Protocol": "HTTPS", + "LoadBalancerPort": 443, + "InstanceProtocol": "HTTP", + "InstancePort": 80, + "SSLCertificateId": "arn:aws:iam::123456789012:server-certificate/my-server-cert" + } + ], + "Subnets": [ + "subnet-15aaab61" + ], + "SecurityGroups": [ + "sg-a61988c3" + ] + }, + "output": { + "DNSName": "my-load-balancer-1234567890.us-west-2.elb.amazonaws.com" + } + }, + { + "title": "To create an HTTPS load balancer in EC2-Classic", + "documentation": "This example creates a load balancer with an HTTPS listener in EC2-Classic.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + }, + { + "Protocol": "HTTPS", + "LoadBalancerPort": 443, + "InstanceProtocol": "HTTP", + "InstancePort": 80, + "SSLCertificateId": "arn:aws:iam::123456789012:server-certificate/my-server-cert" + } + ], + "AvailabilityZones": [ + "us-west-2a" + ] + }, + "output": { + "DNSName": "my-load-balancer-123456789.us-west-2.elb.amazonaws.com" + } + }, + { + "title": "To create an internal load balancer", + "documentation": "This example creates an internal load balancer with an HTTP listener in a VPC.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTP", + "LoadBalancerPort": 80, + "InstanceProtocol": "HTTP", + "InstancePort": 80 + } + ], + "Subnets": [ + "subnet-15aaab61" + ], + "SecurityGroups": [ + "sg-a61988c3" + ], + "Scheme": "internal" + }, + "output": { + "DNSName": "internal-my-load-balancer-123456789.us-west-2.elb.amazonaws.com" + } } ] } @@ -1014,6 +1119,22 @@ } ] } + }, + { + "title": "To create an HTTPS listener for a load balancer", + "documentation": "This example creates a listener for your load balancer at port 443 using the HTTPS protocol.", + "input": { + "LoadBalancerName": "my-load-balancer", + "Listeners": [ + { + "Protocol": "HTTPS", + "LoadBalancerPort": 443, + "InstanceProtocol": "HTTP", + "InstancePort": 80, + "SSLCertificateId": "arn:aws:iam::123456789012:server-certificate/my-server-cert" + } + ] + } } ] } @@ -1060,6 +1181,36 @@ } ] } + }, + { + "title": "To create a public key policy", + "documentation": "This example creates a public key policy.", + "input": { + "LoadBalancerName": "my-load-balancer", + "PolicyName": "my-PublicKey-policy", + "PolicyTypeName": "PublicKeyPolicyType", + "PolicyAttributes": [ + { + "AttributeName": "PublicKey", + "AttributeValue": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwAYUjnfyEyXr1pxjhFWBpMlggUcqoi3kl+dS74kj//c6x7ROtusUaeQCTgIUkayttRDWchuqo1pHC1u+n5xxXnBBe2ejbb2WRsKIQ5rXEeixsjFpFsojpSQKkzhVGI6mJVZBJDVKSHmswnwLBdofLhzvllpovBPTHe+o4haAWvDBALJU0pkSI1FecPHcs2hwxf14zHoXy1e2k36A64nXW43wtfx5qcVSIxtCEOjnYRg7RPvybaGfQ+v6Iaxb/+7J5kEvZhTFQId+bSiJImF1FSUT1W1xwzBZPUbcUkkXDj45vC2s3Z8E+Lk7a3uZhvsQHLZnrfuWjBWGWvZ/MhZYgEXAMPLE" + } + ] + } + }, + { + "title": "To create a backend server authentication policy", + "documentation": "This example creates a backend server authentication policy that enables authentication on your backend instance using a public key policy.", + "input": { + "LoadBalancerName": "my-load-balancer", + "PolicyName": "my-authentication-policy", + "PolicyTypeName": "BackendServerAuthenticationPolicyType", + "PolicyAttributes": [ + { + "AttributeName": "PublicKeyPolicyName", + "AttributeValue": "my-PublicKey-policy" + } + ] + } } ] } @@ -2001,7 +2152,35 @@ "inputToken": "Marker", "outputToken": "NextMarker", "items": "LoadBalancerDescriptions" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeLoadBalancersSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + }, + { + "id": "DescribeLoadBalancersFailure", + "params": { + "LoadBalancerNames": [ + "fake_load_balancer" + ] + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.elasticloadbalancing#DescribeTags": { @@ -4046,6 +4225,28 @@ } } } + }, + { + "title": "To enable connection draining", + "documentation": "This example enables connection draining for the specified load balancer.", + "input": { + "LoadBalancerName": "my-load-balancer", + "LoadBalancerAttributes": { + "ConnectionDraining": { + "Enabled": true, + "Timeout": 300 + } + } + }, + "output": { + "LoadBalancerName": "my-load-balancer", + "LoadBalancerAttributes": { + "ConnectionDraining": { + "Enabled": true, + "Timeout": 300 + } + } + } } ] } diff --git a/models/elasticache.json b/models/elasticache.json index bcc195c9b4..c6cea8493a 100644 --- a/models/elasticache.json +++ b/models/elasticache.json @@ -203,13 +203,13 @@ "ScaleUpModifications": { "target": "com.amazonaws.elasticache#NodeTypeList", "traits": { - "smithy.api#documentation": "A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group.
\nWhen scaling up a Redis cluster or replication group using\n ModifyCacheCluster
or ModifyReplicationGroup
, use a value\n from this list for the CacheNodeType
parameter.
A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group.
\nWhen scaling up a Redis OSS cluster or replication group using\n ModifyCacheCluster
or ModifyReplicationGroup
, use a value\n from this list for the CacheNodeType
parameter.
A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group. When scaling down a Redis cluster or\n replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from\n this list for the CacheNodeType parameter.
" + "smithy.api#documentation": "A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group. When scaling down a Redis OSS cluster or\n replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from\n this list for the CacheNodeType parameter.
" } } }, @@ -1886,7 +1886,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The name of the compute and memory capacity node type for the cluster.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The name of the compute and memory capacity node type for the cluster.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
The number of cache nodes in the cluster.
\nFor clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
" + "smithy.api#documentation": "The number of cache nodes in the cluster.
\nFor clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
" } }, "PreferredAvailabilityZone": { @@ -1973,7 +1973,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "SecurityGroups": { @@ -2003,7 +2003,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "A flag that enables using an AuthToken
(password) when issuing Redis\n commands.
Default: false
\n
A flag that enables using an AuthToken
(password) when issuing Redis OSS \n commands.
Default: false
\n
A flag that enables in-transit encryption when set to true
.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables in-transit encryption when set to true
.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables encryption at-rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the cluster\n is created. To enable at-rest encryption on a cluster you must set\n AtRestEncryptionEnabled
to true
when you create a\n cluster.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables encryption at-rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the cluster\n is created. To enable at-rest encryption on a cluster you must set\n AtRestEncryptionEnabled
to true
when you create a\n cluster.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Default: false
\n
Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
The network type associated with the cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
The network type associated with the cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis OSS.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
A parameter that has a different value for each cache node type it is applied to. For\n example, in a Redis cluster, a cache.m1.large
cache node type would have a\n larger maxmemory
value than a cache.m1.small
type.
A parameter that has a different value for each cache node type it is applied to. For\n example, in a Redis OSS cluster, a cache.m1.large
cache node type would have a\n larger maxmemory
value than a cache.m1.small
type.
Either ipv4
| ipv6
| dual_stack
. IPv6 is\n supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Either ipv4
| ipv6
| dual_stack
. IPv6 is\n supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
The 4-digit id for the node group you are configuring. For Redis (cluster mode\n disabled) replication groups, the node group id is always 0001. To find a Redis (cluster\n mode enabled)'s node group's (shard's) id, see Finding a Shard's\n Id.
", + "smithy.api#documentation": "The 4-digit id for the node group you are configuring. For Redis OSS (cluster mode\n disabled) replication groups, the node group id is always 0001. To find a Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's\n Id.
", "smithy.api#required": {} } }, @@ -3117,14 +3117,14 @@ "target": "com.amazonaws.elasticache#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The number of replicas you want in this node group at the end of this operation.\n The maximum value for NewReplicaCount
is 5. The minimum value depends upon\n the type of Redis replication group you are working with.
The minimum number of replicas in a shard or replication group is:
\nRedis (cluster mode disabled)
\nIf Multi-AZ: 1
\nIf Multi-AZ: 0
\nRedis (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)
\nThe number of replicas you want in this node group at the end of this operation.\n The maximum value for NewReplicaCount
is 5. The minimum value depends upon\n the type of Redis OSS replication group you are working with.
The minimum number of replicas in a shard or replication group is:
\nRedis OSS (cluster mode disabled)
\nIf Multi-AZ: 1
\nIf Multi-AZ: 0
\nRedis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)
\nA list of PreferredAvailabilityZone
strings that specify which\n availability zones the replication group's nodes are to be in. The nummber of\n PreferredAvailabilityZone
values must equal the value of\n NewReplicaCount
plus 1 to account for the primary node. If this member\n of ReplicaConfiguration
is omitted, ElastiCache for Redis selects the\n availability zone for each of the replicas.
A list of PreferredAvailabilityZone
strings that specify which\n availability zones the replication group's nodes are to be in. The nummber of\n PreferredAvailabilityZone
values must equal the value of\n NewReplicaCount
plus 1 to account for the primary node. If this member\n of ReplicaConfiguration
is omitted, ElastiCache (Redis OSS) selects the\n availability zone for each of the replicas.
Creates a copy of an existing serverless cache’s snapshot. Available for Redis only.
" + "smithy.api#documentation": "Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "com.amazonaws.elasticache#CopyServerlessCacheSnapshotRequest": { @@ -3183,7 +3183,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis only.
", + "smithy.api#documentation": "The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#required": {} } }, @@ -3191,20 +3191,20 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The identifier for the snapshot to be created. Available for Redis only.
", + "smithy.api#documentation": "The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of the KMS key used to encrypt the target snapshot. Available for Redis only.
" + "smithy.api#documentation": "The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis only. Default: NULL
" + "smithy.api#documentation": "A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL
" } } }, @@ -3218,7 +3218,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "The response for the attempt to copy the serverless cache snapshot. Available for Redis only.
" + "smithy.api#documentation": "The response for the attempt to copy the serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -3258,7 +3258,7 @@ } ], "traits": { - "smithy.api#documentation": "Makes a copy of an existing snapshot.
\nThis operation is valid for Redis only.
\nUsers or groups that have permissions to use the CopySnapshot
\n operation can create their own Amazon S3 buckets and copy snapshots to it. To\n control access to your snapshots, use an IAM policy to control who has the ability\n to use the CopySnapshot
operation. For more information about using IAM\n to control the use of ElastiCache operations, see Exporting\n Snapshots and Authentication & Access\n Control.
You could receive the following error messages.
\n\n Error Messages\n
\n\n Error Message: The S3 bucket %s is outside of\n the region.
\n\n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.
\n\n Error Message: The S3 bucket %s does not\n exist.
\n\n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.
\n\n Error Message: The S3 bucket %s is not owned\n by the authenticated user.
\n\n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.
\n\n Error Message: The authenticated user does\n not have sufficient permissions to perform the desired activity.
\n\n Solution: Contact your system administrator\n to get the needed permissions.
\n\n Error Message: The S3 bucket %s already\n contains an object with key %s.
\n\n Solution: Give the\n TargetSnapshotName
a new and unique value. If exporting a\n snapshot, you could alternatively create a new Amazon S3 bucket and use this\n same value for TargetSnapshotName
.
\n Error Message: ElastiCache has not been\n granted READ permissions %s on the S3 Bucket.
\n\n Solution: Add List and Read permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.
\n\n Error Message: ElastiCache has not been\n granted WRITE permissions %s on the S3 Bucket.
\n\n Solution: Add Upload/Delete permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.
\n\n Error Message: ElastiCache has not been\n granted READ_ACP permissions %s on the S3 Bucket.
\n\n Solution: Add View Permissions on the bucket.\n For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.
\nMakes a copy of an existing snapshot.
\nThis operation is valid for Redis OSS only.
\nUsers or groups that have permissions to use the CopySnapshot
\n operation can create their own Amazon S3 buckets and copy snapshots to it. To\n control access to your snapshots, use an IAM policy to control who has the ability\n to use the CopySnapshot
operation. For more information about using IAM\n to control the use of ElastiCache operations, see Exporting\n Snapshots and Authentication & Access\n Control.
You could receive the following error messages.
\n\n Error Messages\n
\n\n Error Message: The S3 bucket %s is outside of\n the region.
\n\n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.
\n\n Error Message: The S3 bucket %s does not\n exist.
\n\n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.
\n\n Error Message: The S3 bucket %s is not owned\n by the authenticated user.
\n\n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.
\n\n Error Message: The authenticated user does\n not have sufficient permissions to perform the desired activity.
\n\n Solution: Contact your system administrator\n to get the needed permissions.
\n\n Error Message: The S3 bucket %s already\n contains an object with key %s.
\n\n Solution: Give the\n TargetSnapshotName
a new and unique value. If exporting a\n snapshot, you could alternatively create a new Amazon S3 bucket and use this\n same value for TargetSnapshotName
.
\n Error Message: ElastiCache has not been\n granted READ permissions %s on the S3 Bucket.
\n\n Solution: Add List and Read permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.
\n\n Error Message: ElastiCache has not been\n granted WRITE permissions %s on the S3 Bucket.
\n\n Solution: Add Upload/Delete permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.
\n\n Error Message: ElastiCache has not been\n granted READ_ACP permissions %s on the S3 Bucket.
\n\n Solution: Add View Permissions on the bucket.\n For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.
\nCreates a cluster. All nodes in the cluster run the same protocol-compliant cache\n engine software, either Memcached or Redis.
\nThis operation is not supported for Redis (cluster mode enabled) clusters.
", + "smithy.api#documentation": "Creates a cluster. All nodes in the cluster run the same protocol-compliant cache\n engine software, either Memcached or Redis OSS.
\nThis operation is not supported for Redis OSS (cluster mode enabled) clusters.
", "smithy.api#examples": [ { "title": "CreateCacheCluster", @@ -3528,13 +3528,13 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The initial number of cache nodes that the cluster has.
\nFor clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
\nIf you need more than 40 nodes for your Memcached cluster, please fill out the\n ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.
" + "smithy.api#documentation": "The initial number of cache nodes that the cluster has.
\nFor clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
\nIf you need more than 40 nodes for your Memcached cluster, please fill out the\n ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.
" } }, "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
A single-element string list containing an Amazon Resource Name (ARN) that uniquely\n identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to\n populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any\n commas.
\nThis parameter is only valid if the Engine
parameter is\n redis
.
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
\n
A single-element string list containing an Amazon Resource Name (ARN) that uniquely\n identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to\n populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any\n commas.
\nThis parameter is only valid if the Engine
parameter is\n redis
.
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
\n
The name of a Redis snapshot from which to restore data into the new node group\n (shard). The snapshot status changes to restoring
while the new node group\n (shard) is being created.
This parameter is only valid if the Engine
parameter is\n redis
.
The name of a Redis OSS snapshot from which to restore data into the new node group\n (shard). The snapshot status changes to restoring
while the new node group\n (shard) is being created.
This parameter is only valid if the Engine
parameter is\n redis
.
If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "SnapshotRetentionLimit": { @@ -3666,13 +3666,13 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
Global Datastore for Redis offers fully managed, fast, reliable and secure\n cross-region replication. Using Global Datastore for Redis, you can create cross-region\n read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster\n recovery across regions. For more information, see Replication\n Across Regions Using Global Datastore.
\nThe GlobalReplicationGroupIdSuffix is the\n name of the Global datastore.
\nThe PrimaryReplicationGroupId represents the\n name of the primary cluster that accepts writes and will replicate updates to\n the secondary cluster.
\nGlobal Datastore for Redis OSS offers fully managed, fast, reliable and secure\n cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region\n read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster\n recovery across regions. For more information, see Replication\n Across Regions Using Global Datastore.
\nThe GlobalReplicationGroupIdSuffix is the\n name of the Global datastore.
\nThe PrimaryReplicationGroupId represents the\n name of the primary cluster that accepts writes and will replicate updates to\n the secondary cluster.
\nCreates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication\n group.
\nThis API can be used to create a standalone regional replication group or a secondary\n replication group associated with a Global datastore.
\nA Redis (cluster mode disabled) replication group is a collection of nodes, where\n one of the nodes is a read/write primary and the others are read-only replicas.\n Writes to the primary are asynchronously propagated to the replicas.
\nA Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI:\n node groups). Each shard has a primary node and up to 5 read-only replica nodes. The\n configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which\n is the maximum number or replicas allowed.
\nThe node or shard limit can be increased to a maximum of 500 per cluster if the Redis\n engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node\n cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500\n shards (single primary and no replicas). Make sure there are enough available IP\n addresses to accommodate the increase. Common pitfalls include the subnets in the subnet\n group have too small a CIDR range or the subnets are shared and heavily used by other\n clusters. For more information, see Creating a Subnet\n Group. For versions below 5.0.6, the limit is 250 per cluster.
\nTo request a limit increase, see Amazon Service Limits and\n choose the limit type Nodes per cluster per instance\n type.
\nWhen a Redis (cluster mode disabled) replication group has been successfully created,\n you can add one or more read replicas to it, up to a total of 5 read replicas. If you\n need to increase or decrease the number of node groups (console: shards), you can avail\n yourself of ElastiCache for Redis' scaling. For more information, see Scaling\n ElastiCache for Redis Clusters in the ElastiCache User\n Guide.
\nThis operation is valid for Redis only.
\nCreates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication\n group.
\nThis API can be used to create a standalone regional replication group or a secondary\n replication group associated with a Global datastore.
\nA Redis OSS (cluster mode disabled) replication group is a collection of nodes, where\n one of the nodes is a read/write primary and the others are read-only replicas.\n Writes to the primary are asynchronously propagated to the replicas.
\nA Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI:\n node groups). Each shard has a primary node and up to 5 read-only replica nodes. The\n configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which\n is the maximum number or replicas allowed.
\nThe node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS \n engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node\n cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500\n shards (single primary and no replicas). Make sure there are enough available IP\n addresses to accommodate the increase. Common pitfalls include the subnets in the subnet\n group have too small a CIDR range or the subnets are shared and heavily used by other\n clusters. For more information, see Creating a Subnet\n Group. For versions below 5.0.6, the limit is 250 per cluster.
\nTo request a limit increase, see Amazon Service Limits and\n choose the limit type Nodes per cluster per instance\n type.
\nWhen a Redis OSS (cluster mode disabled) replication group has been successfully created,\n you can add one or more read replicas to it, up to a total of 5 read replicas. If you\n need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. \n For more information, see Scaling\n ElastiCache (Redis OSS) Clusters in the ElastiCache User\n Guide.
\nThis operation is valid for Redis OSS only.
\nSpecifies whether a read-only replica is automatically promoted to read/write primary\n if the existing primary fails.
\n\n AutomaticFailoverEnabled
must be enabled for Redis (cluster mode enabled)\n replication groups.
Default: false
" + "smithy.api#documentation": "Specifies whether a read-only replica is automatically promoted to read/write primary\n if the existing primary fails.
\n\n AutomaticFailoverEnabled
must be enabled for Redis OSS (cluster mode enabled)\n replication groups.
Default: false
" } }, "MultiAZEnabled": { @@ -4279,7 +4279,7 @@ "NumNodeGroups": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "An optional parameter that specifies the number of node groups (shards) for this Redis\n (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit\n this parameter or set it to 1.
\nDefault: 1
" + "smithy.api#documentation": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit\n this parameter or set it to 1.
\nDefault: 1
" } }, "ReplicasPerNodeGroup": { @@ -4291,13 +4291,13 @@ "NodeGroupConfiguration": { "target": "com.amazonaws.elasticache#NodeGroupConfigurationList", "traits": { - "smithy.api#documentation": "A list of node group (shard) configuration options. Each node group (shard)\n configuration has the following members: PrimaryAvailabilityZone
,\n ReplicaAvailabilityZones
, ReplicaCount
, and\n Slots
.
If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled)\n replication group, you can use this parameter to individually configure each node group\n (shard), or you can omit this parameter. However, it is required when seeding a Redis\n (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group\n (shard) using this parameter because you must specify the slots for each node\n group.
" + "smithy.api#documentation": "A list of node group (shard) configuration options. Each node group (shard)\n configuration has the following members: PrimaryAvailabilityZone
,\n ReplicaAvailabilityZones
, ReplicaCount
, and\n Slots
.
If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled)\n replication group, you can use this parameter to individually configure each node group\n (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group\n (shard) using this parameter because you must specify the slots for each node\n group.
" } }, "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
The name of the parameter group to associate with this replication group. If this\n argument is omitted, the default cache parameter group for the specified engine is\n used.
\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want\n to use a default parameter group, we recommend that you specify the parameter group by\n name.
\nTo create a Redis (cluster mode disabled) replication group, use\n CacheParameterGroupName=default.redis3.2
.
To create a Redis (cluster mode enabled) replication group, use\n CacheParameterGroupName=default.redis3.2.cluster.on
.
The name of the parameter group to associate with this replication group. If this\n argument is omitted, the default cache parameter group for the specified engine is\n used.
\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want\n to use a default parameter group, we recommend that you specify the parameter group by\n name.
\nTo create a Redis OSS (cluster mode disabled) replication group, use\n CacheParameterGroupName=default.redis3.2
.
To create a Redis OSS (cluster mode enabled) replication group, use\n CacheParameterGroupName=default.redis3.2.cluster.on
.
A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot\n files stored in Amazon S3. The snapshot files are used to populate the new replication\n group. The Amazon S3 object name in the ARN cannot contain any commas. The new\n replication group will have the number of node groups (console: shards) specified by the\n parameter NumNodeGroups or the number of node groups configured by\n NodeGroupConfiguration regardless of the number of ARNs\n specified here.
\nExample of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
\n
A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot\n files stored in Amazon S3. The snapshot files are used to populate the new replication\n group. The Amazon S3 object name in the ARN cannot contain any commas. The new\n replication group will have the number of node groups (console: shards) specified by the\n parameter NumNodeGroups or the number of node groups configured by\n NodeGroupConfiguration regardless of the number of ARNs\n specified here.
\nExample of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
\n
If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "SnapshotRetentionLimit": { @@ -4399,13 +4399,13 @@ "TransitEncryptionEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "A flag that enables in-transit encryption when set to true
.
This parameter is valid only if the Engine
parameter is\n redis
, the EngineVersion
parameter is 3.2.6
,\n 4.x
or later, and the cluster is being created in an Amazon VPC.
If you enable in-transit encryption, you must also specify a value for\n CacheSubnetGroup
.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
Default: false
\n
For HIPAA compliance, you must specify TransitEncryptionEnabled
as\n true
, an AuthToken
, and a\n CacheSubnetGroup
.
A flag that enables in-transit encryption when set to true
.
This parameter is valid only if the Engine
parameter is\n redis
, the EngineVersion
parameter is 3.2.6
,\n 4.x
or later, and the cluster is being created in an Amazon VPC.
If you enable in-transit encryption, you must also specify a value for\n CacheSubnetGroup
.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Default: false
\n
For HIPAA compliance, you must specify TransitEncryptionEnabled
as\n true
, an AuthToken
, and a\n CacheSubnetGroup
.
A flag that enables encryption at rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled
to true
when you create the\n replication group.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables encryption at rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled
to true
when you create the\n replication group.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Default: false
\n
Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
The network type you choose when creating a replication group, either\n ipv4
| ipv6
. IPv6 is supported for workloads using Redis\n engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on\n the Nitro system.
The network type you choose when creating a replication group, either\n ipv4
| ipv6
. IPv6 is supported for workloads using Redis OSS\n engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on\n the Nitro system.
A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.
\nWhen setting TransitEncryptionEnabled
to true
, you can set\n your TransitEncryptionMode
to preferred
in the same request,\n to allow both encrypted and unencrypted connections at the same time. Once you migrate\n all your Redis clients to use encrypted connections you can modify the value to\n required
to allow encrypted connections only.
Setting TransitEncryptionMode
to required
is a two-step\n process that requires you to first set the TransitEncryptionMode
to\n preferred
, after that you can set TransitEncryptionMode
to\n required
.
This process will not trigger the replacement of the replication group.
" + "smithy.api#documentation": "A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.
\nWhen setting TransitEncryptionEnabled
to true
, you can set\n your TransitEncryptionMode
to preferred
in the same request,\n to allow both encrypted and unencrypted connections at the same time. Once you migrate\n all your Redis OSS clients to use encrypted connections you can modify the value to\n required
to allow encrypted connections only.
Setting TransitEncryptionMode
to required
is a two-step\n process that requires you to first set the TransitEncryptionMode
to\n preferred
, after that you can set TransitEncryptionMode
to\n required
.
This process will not trigger the replacement of the replication group.
" } }, "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" + "smithy.api#documentation": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" } }, "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The name of the snapshot used to create a replication group. Available for Redis only.
" + "smithy.api#documentation": "The name of the snapshot used to create a replication group. Available for Redis OSS only.
" } } }, @@ -4578,7 +4578,7 @@ "SnapshotArnsToRestore": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only.
" + "smithy.api#documentation": "The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only.
" } }, "Tags": { @@ -4590,7 +4590,7 @@ "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL.
" + "smithy.api#documentation": "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL.
" } }, "SubnetIds": { @@ -4602,13 +4602,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The number of snapshots that will be retained for the serverless cache that is being created. \n As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only.
" + "smithy.api#documentation": "The number of snapshots that will be retained for the serverless cache that is being created. \n As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only.
" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The daily time that snapshots will be created from the new serverless cache. By default this number is populated with \n 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis only.
" + "smithy.api#documentation": "The daily time that snapshots will be created from the new serverless cache. By default this number is populated with \n 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -4665,7 +4665,7 @@ } ], "traits": { - "smithy.api#documentation": "This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis only.
" + "smithy.api#documentation": "This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only.
" } }, "com.amazonaws.elasticache#CreateServerlessCacheSnapshotRequest": { @@ -4675,7 +4675,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The name for the snapshot being created. Must be unique for the customer account. Available for Redis only.\n Must be between 1 and 255 characters.
", + "smithy.api#documentation": "The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only.\n Must be between 1 and 255 characters.
", "smithy.api#required": {} } }, @@ -4683,20 +4683,20 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis only.
", + "smithy.api#documentation": "The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The ID of the KMS key used to encrypt the snapshot. Available for Redis only. Default: NULL
" + "smithy.api#documentation": "The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL
" } }, "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis only.
" + "smithy.api#documentation": "A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -4710,7 +4710,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis only.
" + "smithy.api#documentation": "The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -4759,7 +4759,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a copy of an entire cluster or replication group at a specific moment in\n time.
\nThis operation is valid for Redis only.
\nCreates a copy of an entire cluster or replication group at a specific moment in\n time.
\nThis operation is valid for Redis OSS only.
\nFor Redis engine version 6.0 onwards: Creates a Redis user. For more information, see\n Using Role Based Access Control (RBAC).
" + "smithy.api#documentation": "For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see\n Using Role Based Access Control (RBAC).
" } }, "com.amazonaws.elasticache#CreateUserGroup": { @@ -5000,7 +5000,7 @@ } ], "traits": { - "smithy.api#documentation": "For Redis engine version 6.0 onwards: Creates a Redis user group. For more\n information, see Using Role Based Access Control (RBAC)\n
" + "smithy.api#documentation": "For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more\n information, see Using Role Based Access Control (RBAC)\n
" } }, "com.amazonaws.elasticache#CreateUserGroupMessage": { @@ -5018,7 +5018,7 @@ "target": "com.amazonaws.elasticache#EngineType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The current supported value is Redis.
", + "smithy.api#documentation": "The current supported value is Redis user.
", "smithy.api#required": {} } }, @@ -5031,7 +5031,7 @@ "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must\n be accompanied by a tag value, although null is accepted. Available for Redis only.
" + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must\n be accompanied by a tag value, although null is accepted. Available for Redis OSS only.
" } } }, @@ -5233,13 +5233,13 @@ "GlobalNodeGroupsToRemove": { "target": "com.amazonaws.elasticache#GlobalNodeGroupIdList", "traits": { - "smithy.api#documentation": "If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.\n ElastiCache for Redis will attempt to remove all node groups listed by\n GlobalNodeGroupsToRemove from the cluster.
" + "smithy.api#documentation": "If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.\n ElastiCache (Redis OSS) will attempt to remove all node groups listed by\n GlobalNodeGroupsToRemove from the cluster.
" } }, "GlobalNodeGroupsToRetain": { "target": "com.amazonaws.elasticache#GlobalNodeGroupIdList", "traits": { - "smithy.api#documentation": "If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster.\n ElastiCache for Redis will attempt to retain all node groups listed by\n GlobalNodeGroupsToRetain from the cluster.
" + "smithy.api#documentation": "If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster.\n ElastiCache (Redis OSS) will attempt to retain all node groups listed by\n GlobalNodeGroupsToRetain from the cluster.
" } }, "ApplyImmediately": { @@ -5313,7 +5313,7 @@ } ], "traits": { - "smithy.api#documentation": "Dynamically decreases the number of replicas in a Redis (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.
" + "smithy.api#documentation": "Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.
" } }, "com.amazonaws.elasticache#DecreaseReplicaCountMessage": { @@ -5330,13 +5330,13 @@ "NewReplicaCount": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The number of read replica nodes you want at the completion of this operation. For\n Redis (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.
\nThe minimum number of replicas in a shard or replication group is:
\nRedis (cluster mode disabled)
\nIf Multi-AZ is enabled: 1
\nIf Multi-AZ is not enabled: 0
\nRedis (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)
\nThe number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.
\nThe minimum number of replicas in a shard or replication group is:
\nRedis OSS (cluster mode disabled)
\nIf Multi-AZ is enabled: 1
\nIf Multi-AZ is not enabled: 0
\nRedis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)
\nA list of ConfigureShard
objects that can be used to configure each\n shard in a Redis (cluster mode enabled) replication group. The\n ConfigureShard
has three members: NewReplicaCount
,\n NodeGroupId
, and PreferredAvailabilityZones
.
A list of ConfigureShard
objects that can be used to configure each\n shard in a Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard
has three members: NewReplicaCount
,\n NodeGroupId
, and PreferredAvailabilityZones
.
Deletes a previously provisioned cluster. DeleteCacheCluster
deletes all\n associated cache nodes, node endpoints and the cluster itself. When you receive a\n successful response from this operation, Amazon ElastiCache immediately begins deleting\n the cluster; you cannot cancel or revert this operation.
This operation is not valid for:
\nRedis (cluster mode enabled) clusters
\nRedis (cluster mode disabled) clusters
\nA cluster that is the last read replica of a replication group
\nA cluster that is the primary node of a replication group
\nA node group (shard) that has Multi-AZ mode enabled
\nA cluster from a Redis (cluster mode enabled) replication group
\nA cluster that is not in the available
state
Deletes a previously provisioned cluster. DeleteCacheCluster
deletes all\n associated cache nodes, node endpoints and the cluster itself. When you receive a\n successful response from this operation, Amazon ElastiCache immediately begins deleting\n the cluster; you cannot cancel or revert this operation.
This operation is not valid for:
\nRedis OSS (cluster mode enabled) clusters
\nRedis OSS (cluster mode disabled) clusters
\nA cluster that is the last read replica of a replication group
\nA cluster that is the primary node of a replication group
\nA node group (shard) that has Multi-AZ mode enabled
\nA cluster from a Redis OSS (cluster mode enabled) replication group
\nA cluster that is not in the available
state
Deletes an existing replication group. By default, this operation deletes the entire\n replication group, including the primary/primaries and all of the read replicas. If the\n replication group has only one primary, you can optionally delete only the read\n replicas, while retaining the primary by setting\n RetainPrimaryCluster=true
.
When you receive a successful response from this operation, Amazon ElastiCache\n immediately begins deleting the selected resources; you cannot cancel or revert this\n operation.
\nThis operation is valid for Redis only.
\nDeletes an existing replication group. By default, this operation deletes the entire\n replication group, including the primary/primaries and all of the read replicas. If the\n replication group has only one primary, you can optionally delete only the read\n replicas, while retaining the primary by setting\n RetainPrimaryCluster=true
.
When you receive a successful response from this operation, Amazon ElastiCache\n immediately begins deleting the selected resources; you cannot cancel or revert this\n operation.
\n\n CreateSnapshot
permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied
exception.
This operation is valid for Redis OSS only.
\nDeletes a specified existing serverless cache.
" + "smithy.api#documentation": "Deletes a specified existing serverless cache.
\n\n CreateServerlessCacheSnapshot
permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied
exception.
Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis only.\n Default: NULL, i.e. a final snapshot is not taken.
" + "smithy.api#documentation": "Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only.\n Default: NULL, i.e. a final snapshot is not taken.
" } } }, @@ -5904,7 +5904,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing serverless cache snapshot. Available for Redis only.
" + "smithy.api#documentation": "Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "com.amazonaws.elasticache#DeleteServerlessCacheSnapshotRequest": { @@ -5914,7 +5914,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "Idenfitier of the snapshot to be deleted. Available for Redis only.
", + "smithy.api#documentation": "Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#required": {} } } @@ -5929,7 +5929,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "The snapshot to be deleted. Available for Redis only.
" + "smithy.api#documentation": "The snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -5960,7 +5960,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing snapshot. When you receive a successful response from this\n operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or\n revert this operation.
\nThis operation is valid for Redis only.
\nDeletes an existing snapshot. When you receive a successful response from this\n operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or\n revert this operation.
\nThis operation is valid for Redis OSS only.
\nFor Redis engine version 6.0 onwards: Deletes a user. The user will be removed from\n all user groups and in turn removed from all replication groups. For more information,\n see Using Role Based Access Control (RBAC).
" + "smithy.api#documentation": "For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from\n all user groups and in turn removed from all replication groups. For more information,\n see Using Role Based Access Control (RBAC).
" } }, "com.amazonaws.elasticache#DeleteUserGroup": { @@ -6082,7 +6082,7 @@ } ], "traits": { - "smithy.api#documentation": "For Redis engine version 6.0 onwards: Deletes a user group. The user group must first\n be disassociated from the replication group before it can be deleted. For more\n information, see Using Role Based Access Control (RBAC).
" + "smithy.api#documentation": "For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first\n be disassociated from the replication group before it can be deleted. For more\n information, see Using Role Based Access Control (RBAC).
" } }, "com.amazonaws.elasticache#DeleteUserGroupMessage": { @@ -6418,7 +6418,7 @@ "ShowCacheClustersNotInReplicationGroups": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "An optional flag that can be included in the DescribeCacheCluster
request\n to show only nodes (API/CLI: clusters) that are not members of a replication group. In\n practice, this mean Memcached and single node Redis clusters.
An optional flag that can be included in the DescribeCacheCluster
request\n to show only nodes (API/CLI: clusters) that are not members of a replication group. In\n practice, this mean Memcached and single node Redis OSS clusters.
Returns information about a particular replication group. If no identifier is\n specified, DescribeReplicationGroups
returns information about all\n replication groups.
This operation is valid for Redis only.
\nReturns information about a particular replication group. If no identifier is\n specified, DescribeReplicationGroups
returns information about all\n replication groups.
This operation is valid for Redis OSS only.
\nThe cache node type filter value. Use this parameter to show only those reservations\n matching the specified cache node type.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The cache node type filter value. Use this parameter to show only those reservations\n matching the specified cache node type.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
The cache node type filter value. Use this parameter to show only the available\n offerings matching the specified cache node type.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The cache node type filter value. Use this parameter to show only the available\n offerings matching the specified cache node type.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
Returns information about serverless cache snapshots. \n By default, this API lists all of the customer’s serverless cache snapshots. \n It can also describe a single serverless cache snapshot, or the snapshots associated with \n a particular serverless cache. Available for Redis only.
", + "smithy.api#documentation": "Returns information about serverless cache snapshots. \n By default, this API lists all of the customer’s serverless cache snapshots. \n It can also describe a single serverless cache snapshot, or the snapshots associated with \n a particular serverless cache. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8979,31 +8979,31 @@ "ServerlessCacheName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of serverless cache. If this parameter is specified, \n only snapshots associated with that specific serverless cache are described. Available for Redis only.
" + "smithy.api#documentation": "The identifier of serverless cache. If this parameter is specified, \n only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only.
" } }, "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of the serverless cache’s snapshot.\n If this parameter is specified, only this snapshot is described. Available for Redis only.
" + "smithy.api#documentation": "The identifier of the serverless cache’s snapshot.\n If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only.
" } }, "SnapshotType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The type of snapshot that is being described. Available for Redis only.
" + "smithy.api#documentation": "The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only.
" } }, "NextToken": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis only.
" + "smithy.api#documentation": "An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.
" } }, "MaxResults": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than \n the specified max-results value, a market is included in the response so that remaining results \n can be retrieved. Available for Redis only.The default is 50. The Validation Constraints are a maximum of 50.
" + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than \n the specified max-results value, a market is included in the response so that remaining results \n can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50.
" } } }, @@ -9017,13 +9017,13 @@ "NextToken": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis only.
" + "smithy.api#documentation": "An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.
" } }, "ServerlessCacheSnapshots": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshotList", "traits": { - "smithy.api#documentation": "The serverless caches snapshots associated with a given description request. Available for Redis only.
" + "smithy.api#documentation": "The serverless caches snapshots associated with a given description request. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -9190,7 +9190,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns information about cluster or replication group snapshots. By default,\n DescribeSnapshots
lists all of your snapshots; it can optionally\n describe a single snapshot, or just the snapshots associated with a particular cache\n cluster.
This operation is valid for Redis only.
\nReturns information about cluster or replication group snapshots. By default,\n DescribeSnapshots
lists all of your snapshots; it can optionally\n describe a single snapshot, or just the snapshots associated with a particular cache\n cluster.
This operation is valid for Redis OSS only.
\nThe Elasticache engine to which the update applies. Either Redis or Memcached
" + "smithy.api#documentation": "The Elasticache engine to which the update applies. Either Redis OSS or Memcached.
" } }, "ServiceUpdateStatus": { @@ -9517,7 +9517,7 @@ "Engine": { "target": "com.amazonaws.elasticache#EngineType", "traits": { - "smithy.api#documentation": "The Redis engine.
" + "smithy.api#documentation": "The Redis OSS engine.
" } }, "UserId": { @@ -9896,7 +9896,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis only.
" + "smithy.api#documentation": "Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only.
" } }, "com.amazonaws.elasticache#ExportServerlessCacheSnapshotRequest": { @@ -9906,7 +9906,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The identifier of the serverless cache snapshot to be exported to S3. Available for Redis only.
", + "smithy.api#documentation": "The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only.
", "smithy.api#required": {} } }, @@ -9914,7 +9914,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region \n as the snapshot. Available for Redis only.
", + "smithy.api#documentation": "Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region \n as the snapshot. Available for Redis OSS only.
", "smithy.api#required": {} } } @@ -9929,7 +9929,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis only.
" + "smithy.api#documentation": "The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -10127,13 +10127,13 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Elasticache engine. For Redis only.
" + "smithy.api#documentation": "The Elasticache engine. For Redis OSS only.
" } }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Elasticache Redis engine version.
" + "smithy.api#documentation": "The Elasticache (Redis OSS) engine version.
" } }, "Members": { @@ -10157,19 +10157,19 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "A flag that enables using an AuthToken
(password) when issuing Redis\n commands.
Default: false
\n
A flag that enables using an AuthToken
(password) when issuing Redis OSS \n commands.
Default: false
\n
A flag that enables in-transit encryption when set to true.
\n\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
A flag that enables in-transit encryption when set to true.
\n\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
A flag that enables encryption at rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled
to true
when you create the\n replication group.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
A flag that enables encryption at rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the\n replication group is created. To enable encryption at rest on a replication group you\n must set AtRestEncryptionEnabled
to true
when you create the\n replication group.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Dynamically increases the number of replicas in a Redis (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.
" + "smithy.api#documentation": "Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.
" } }, "com.amazonaws.elasticache#IncreaseReplicaCountMessage": { @@ -10429,13 +10429,13 @@ "NewReplicaCount": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The number of read replica nodes you want at the completion of this operation. For\n Redis (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.
" + "smithy.api#documentation": "The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.
" } }, "ReplicaConfiguration": { "target": "com.amazonaws.elasticache#ReplicaConfigurationList", "traits": { - "smithy.api#documentation": "A list of ConfigureShard
objects that can be used to configure each\n shard in a Redis (cluster mode enabled) replication group. The\n ConfigureShard
has three members: NewReplicaCount
,\n NodeGroupId
, and PreferredAvailabilityZones
.
A list of ConfigureShard
objects that can be used to configure each\n shard in a Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard
has three members: NewReplicaCount
,\n NodeGroupId
, and PreferredAvailabilityZones
.
The state of the serverless cache snapshot was not received. Available for Redis only.
", + "smithy.api#documentation": "The state of the serverless cache snapshot was not received. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -10863,7 +10863,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all available node types that you can scale your Redis cluster's or replication\n group's current node type.
\nWhen you use the ModifyCacheCluster
or\n ModifyReplicationGroup
operations to scale your cluster or replication\n group, the value of the CacheNodeType
parameter must be one of the node\n types returned by this operation.
Lists all available node types that you can scale your Redis OSS cluster's or replication\n group's current node type.
\nWhen you use the ModifyCacheCluster
or\n ModifyReplicationGroup
operations to scale your cluster or replication\n group, the value of the CacheNodeType
parameter must be one of the node\n types returned by this operation.
The number of cache nodes that the cluster should have. If the value for\n NumCacheNodes
is greater than the sum of the number of current cache\n nodes and the number of cache nodes pending creation (which may be zero), more nodes are\n added. If the value is less than the number of existing cache nodes, nodes are removed.\n If the value is equal to the number of current cache nodes, any pending add or remove\n requests are canceled.
If you are removing cache nodes, you must use the CacheNodeIdsToRemove
\n parameter to provide the IDs of the specific cache nodes to remove.
For clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
\nAdding or removing Memcached cache nodes can be applied immediately or as a\n pending operation (see ApplyImmediately
).
A pending operation to modify the number of cache nodes in a cluster during its\n maintenance window, whether by adding or removing nodes in accordance with the scale\n out architecture, is not queued. The customer's latest request to add or remove\n nodes to the cluster overrides any previous pending operations to modify the number\n of cache nodes in the cluster. For example, a request to remove 2 nodes would\n override a previous pending operation to remove 3 nodes. Similarly, a request to add\n 2 nodes would override a previous pending operation to remove 3 nodes and vice\n versa. As Memcached cache nodes may now be provisioned in different Availability\n Zones with flexible cache node placement, a request to add nodes does not\n automatically override a previous pending operation to add nodes. The customer can\n modify the previous pending operation to add more nodes or explicitly cancel the\n pending request and retry the new request. To cancel pending operations to modify\n the number of cache nodes in a cluster, use the ModifyCacheCluster
\n request and set NumCacheNodes
equal to the number of cache nodes\n currently in the cluster.
The number of cache nodes that the cluster should have. If the value for\n NumCacheNodes
is greater than the sum of the number of current cache\n nodes and the number of cache nodes pending creation (which may be zero), more nodes are\n added. If the value is less than the number of existing cache nodes, nodes are removed.\n If the value is equal to the number of current cache nodes, any pending add or remove\n requests are canceled.
If you are removing cache nodes, you must use the CacheNodeIdsToRemove
\n parameter to provide the IDs of the specific cache nodes to remove.
For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
\nAdding or removing Memcached cache nodes can be applied immediately or as a\n pending operation (see ApplyImmediately
).
A pending operation to modify the number of cache nodes in a cluster during its\n maintenance window, whether by adding or removing nodes in accordance with the scale\n out architecture, is not queued. The customer's latest request to add or remove\n nodes to the cluster overrides any previous pending operations to modify the number\n of cache nodes in the cluster. For example, a request to remove 2 nodes would\n override a previous pending operation to remove 3 nodes. Similarly, a request to add\n 2 nodes would override a previous pending operation to remove 3 nodes and vice\n versa. As Memcached cache nodes may now be provisioned in different Availability\n Zones with flexible cache node placement, a request to add nodes does not\n automatically override a previous pending operation to add nodes. The customer can\n modify the previous pending operation to add more nodes or explicitly cancel the\n pending request and retry the new request. To cancel pending operations to modify\n the number of cache nodes in a cluster, use the ModifyCacheCluster
\n request and set NumCacheNodes
equal to the number of cache nodes\n currently in the cluster.
If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "SnapshotRetentionLimit": { @@ -11383,7 +11383,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
\nSET - allowed only after ROTATE
\nDELETE - allowed only when transitioning to RBAC
\nFor more information, see Authenticating Users with Redis AUTH\n
" + "smithy.api#documentation": "Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
\nSET - allowed only after ROTATE
\nDELETE - allowed only when transitioning to RBAC
\nFor more information, see Authenticating Users with Redis OSS AUTH\n
" } }, "LogDeliveryConfigurations": { @@ -11395,7 +11395,7 @@ "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
Modifies the settings for a replication group. This is limited to Redis 7 and newer.
\n\n Scaling for Amazon ElastiCache for Redis (cluster mode enabled) in\n the ElastiCache User Guide
\n\n ModifyReplicationGroupShardConfiguration in the ElastiCache API\n Reference
\nThis operation is valid for Redis only.
\nModifies the settings for a replication group. This is limited to Redis OSS 7 and newer.
\n\n Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in\n the ElastiCache User Guide
\n\n ModifyReplicationGroupShardConfiguration in the ElastiCache API\n Reference
\nThis operation is valid for Redis OSS only.
\nThe cluster ID that is used as the daily snapshot source for the replication group.\n This parameter cannot be set for Redis (cluster mode enabled) replication groups.
" + "smithy.api#documentation": "The cluster ID that is used as the daily snapshot source for the replication group.\n This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.
" } }, "AutomaticFailoverEnabled": { @@ -11930,7 +11930,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "SnapshotRetentionLimit": { @@ -11960,7 +11960,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
\nSET - allowed only after ROTATE
\nDELETE - allowed only when transitioning to RBAC
\nFor more information, see Authenticating Users with Redis AUTH\n
" + "smithy.api#documentation": "Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
\nSET - allowed only after ROTATE
\nDELETE - allowed only when transitioning to RBAC
\nFor more information, see Authenticating Users with Redis OSS AUTH\n
" } }, "UserGroupIdsToAdd": { @@ -11990,7 +11990,7 @@ "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.
\nYou must set TransitEncryptionEnabled
to true
, for your\n existing cluster, and set TransitEncryptionMode
to preferred
\n in the same request to allow both encrypted and unencrypted connections at the same\n time. Once you migrate all your Redis clients to use encrypted connections you can set\n the value to required
to allow encrypted connections only.
Setting TransitEncryptionMode
to required
is a two-step\n process that requires you to first set the TransitEncryptionMode
to\n preferred
, after that you can set TransitEncryptionMode
to\n required
.
A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.
\nYou must set TransitEncryptionEnabled
to true
, for your\n existing cluster, and set TransitEncryptionMode
to preferred
\n in the same request to allow both encrypted and unencrypted connections at the same\n time. Once you migrate all your Redis OSS clients to use encrypted connections you can set\n the value to required
to allow encrypted connections only.
Setting TransitEncryptionMode
to required
is a two-step\n process that requires you to first set the TransitEncryptionMode
to\n preferred
, after that you can set TransitEncryptionMode
to\n required
.
Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" + "smithy.api#documentation": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" } } }, @@ -12079,7 +12079,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The name of the Redis (cluster mode enabled) cluster (replication group) on which the\n shards are to be configured.
", + "smithy.api#documentation": "The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the\n shards are to be configured.
", "smithy.api#required": {} } }, @@ -12108,13 +12108,13 @@ "NodeGroupsToRemove": { "target": "com.amazonaws.elasticache#NodeGroupsToRemoveList", "traits": { - "smithy.api#documentation": "If the value of NodeGroupCount
is less than the current number of node\n groups (shards), then either NodeGroupsToRemove
or\n NodeGroupsToRetain
is required. NodeGroupsToRemove
is a\n list of NodeGroupId
s to remove from the cluster.
ElastiCache for Redis will attempt to remove all node groups listed by\n NodeGroupsToRemove
from the cluster.
If the value of NodeGroupCount
is less than the current number of node\n groups (shards), then either NodeGroupsToRemove
or\n NodeGroupsToRetain
is required. NodeGroupsToRemove
is a\n list of NodeGroupId
s to remove from the cluster.
ElastiCache (Redis OSS) will attempt to remove all node groups listed by\n NodeGroupsToRemove
from the cluster.
If the value of NodeGroupCount
is less than the current number of node\n groups (shards), then either NodeGroupsToRemove
or\n NodeGroupsToRetain
is required. NodeGroupsToRetain
is a\n list of NodeGroupId
s to retain in the cluster.
ElastiCache for Redis will attempt to remove all node groups except those listed by\n NodeGroupsToRetain
from the cluster.
If the value of NodeGroupCount
is less than the current number of node\n groups (shards), then either NodeGroupsToRemove
or\n NodeGroupsToRetain
is required. NodeGroupsToRetain
is a\n list of NodeGroupId
s to retain in the cluster.
ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by\n NodeGroupsToRetain
from the cluster.
The identifier of the UserGroup to be removed from association with the Redis serverless cache. Available for Redis only. Default is NULL.
" + "smithy.api#documentation": "The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL.
" } }, "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. \n Default is NULL - the existing UserGroup is not removed.
" + "smithy.api#documentation": "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. \n Default is NULL - the existing UserGroup is not removed.
" } }, "SecurityGroupIds": { @@ -12216,13 +12216,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The number of days for which Elasticache retains automatic snapshots before deleting them. \n Available for Redis only.\n Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. \n The maximum value allowed is 35 days.
" + "smithy.api#documentation": "The number of days for which Elasticache retains automatic snapshots before deleting them. \n Available for Redis OSS and Serverless Memcached only.\n Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. \n The maximum value allowed is 35 days.
" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis only.\n The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.
" + "smithy.api#documentation": "The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only.\n The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.
" } } }, @@ -12454,7 +12454,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier for the node group (shard). A Redis (cluster mode disabled) replication\n group contains only 1 node group; therefore, the node group ID is 0001. A Redis (cluster\n mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090.\n Optionally, the user can provide the id for a node group.
" + "smithy.api#documentation": "The identifier for the node group (shard). A Redis OSS (cluster mode disabled) replication\n group contains only 1 node group; therefore, the node group ID is 0001. A Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090.\n Optionally, the user can provide the id for a node group.
" } }, "Status": { @@ -12498,7 +12498,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { - "smithy.api#documentation": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.
" + "smithy.api#documentation": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.
" } }, "Slots": { @@ -12578,7 +12578,7 @@ "ReadEndpoint": { "target": "com.amazonaws.elasticache#Endpoint", "traits": { - "smithy.api#documentation": "The information required for client programs to connect to a node for read operations.\n The read endpoint is only applicable on Redis (cluster mode disabled) clusters.
" + "smithy.api#documentation": "The information required for client programs to connect to a node for read operations.\n The read endpoint is only applicable on Redis OSS (cluster mode disabled) clusters.
" } }, "PreferredAvailabilityZone": { @@ -12596,7 +12596,7 @@ "CurrentRole": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The role that is currently assigned to the node - primary
or\n replica
. This member is only applicable for Redis (cluster mode\n disabled) replication groups.
The role that is currently assigned to the node - primary
or\n replica
. This member is only applicable for Redis OSS (cluster mode\n disabled) replication groups.
The new number of cache nodes for the cluster.
\nFor clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
" + "smithy.api#documentation": "The new number of cache nodes for the cluster.
\nFor clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
" } }, "CacheNodeIdsToRemove": { @@ -13232,7 +13232,7 @@ "UpdateActionStatus": { "target": "com.amazonaws.elasticache#UpdateActionStatus", "traits": { - "smithy.api#documentation": "The status of the update action on the Redis cluster
" + "smithy.api#documentation": "The status of the update action on the Redis OSS cluster
" } } }, @@ -13278,7 +13278,7 @@ } ], "traits": { - "smithy.api#documentation": "Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible\n for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis or Managing Costs with\n Reserved Nodes for Memcached.
", + "smithy.api#documentation": "Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible\n for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with\n Reserved Nodes for Memcached.
", "smithy.api#examples": [ { "title": "PurchaseReservedCacheNodesOfferings", @@ -13411,7 +13411,7 @@ } ], "traits": { - "smithy.api#documentation": "Reboots some, or all, of the cache nodes within a provisioned cluster. This operation\n applies any modified cache parameter groups to the cluster. The reboot operation takes\n place as soon as possible, and results in a momentary outage to the cluster. During the\n reboot, the cluster status is set to REBOOTING.
\nThe reboot causes the contents of the cache (for each cache node being rebooted) to be\n lost.
\nWhen the reboot is complete, a cluster event is created.
\nRebooting a cluster is currently supported on Memcached and Redis (cluster mode\n disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled)\n clusters.
\nIf you make changes to parameters that require a Redis (cluster mode enabled) cluster\n reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.
", + "smithy.api#documentation": "Reboots some, or all, of the cache nodes within a provisioned cluster. This operation\n applies any modified cache parameter groups to the cluster. The reboot operation takes\n place as soon as possible, and results in a momentary outage to the cluster. During the\n reboot, the cluster status is set to REBOOTING.
\nThe reboot causes the contents of the cache (for each cache node being rebooted) to be\n lost.
\nWhen the reboot is complete, a cluster event is created.
\nRebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode\n disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled)\n clusters.
\nIf you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster\n reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.
", "smithy.api#examples": [ { "title": "RebootCacheCluster", @@ -13742,7 +13742,7 @@ "NodeGroups": { "target": "com.amazonaws.elasticache#NodeGroupList", "traits": { - "smithy.api#documentation": "A list of node groups in this replication group. For Redis (cluster mode disabled)\n replication groups, this is a single-element list. For Redis (cluster mode enabled)\n replication groups, the list contains an entry for each node group (shard).
" + "smithy.api#documentation": "A list of node groups in this replication group. For Redis OSS (cluster mode disabled)\n replication groups, this is a single-element list. For Redis OSS (cluster mode enabled)\n replication groups, the list contains an entry for each node group (shard).
" } }, "SnapshottingClusterId": { @@ -13754,7 +13754,7 @@ "AutomaticFailover": { "target": "com.amazonaws.elasticache#AutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "Indicates the status of automatic failover for this Redis replication group.
" + "smithy.api#documentation": "Indicates the status of automatic failover for this Redis OSS replication group.
" } }, "MultiAZ": { @@ -13796,7 +13796,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "A flag that enables using an AuthToken
(password) when issuing Redis\n commands.
Default: false
\n
A flag that enables using an AuthToken
(password) when issuing Redis OSS \n commands.
Default: false
\n
A flag that enables in-transit encryption when set to true
.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables in-transit encryption when set to true
.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables encryption at-rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the cluster\n is created. To enable encryption at-rest on a cluster you must set\n AtRestEncryptionEnabled
to true
when you create a\n cluster.
\n Required: Only available when creating a replication\n group in an Amazon VPC using redis version 3.2.6
, 4.x
or\n later.
Default: false
\n
A flag that enables encryption at-rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the cluster\n is created. To enable encryption at-rest on a cluster you must set\n AtRestEncryptionEnabled
to true
when you create a\n cluster.
\n Required: Only available when creating a replication\n group in an Amazon VPC using Redis OSS version 3.2.6
, 4.x
or\n later.
Default: false
\n
If you are running Redis engine version 6.0 or later, set this parameter to yes if you\n want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you\n want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Must be either ipv4
| ipv6
| dual_stack
. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
The network type you choose when modifying a cluster, either ipv4
|\n ipv6
. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.
Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" + "smithy.api#documentation": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" } } }, "traits": { - "smithy.api#documentation": "Contains all of the attributes of a specific Redis replication group.
" + "smithy.api#documentation": "Contains all of the attributes of a specific Redis OSS replication group.
" } }, "com.amazonaws.elasticache#ReplicationGroupAlreadyExistsFault": { @@ -14025,7 +14025,7 @@ "AutomaticFailoverStatus": { "target": "com.amazonaws.elasticache#PendingAutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "Indicates the status of automatic failover for this Redis replication group.
" + "smithy.api#documentation": "Indicates the status of automatic failover for this Redis OSS replication group.
" } }, "Resharding": { @@ -14067,12 +14067,12 @@ "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" + "smithy.api#documentation": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.
" } } }, "traits": { - "smithy.api#documentation": "The settings to be applied to the Redis replication group, either immediately or\n during the next maintenance window.
" + "smithy.api#documentation": "The settings to be applied to the Redis OSS replication group, either immediately or\n during the next maintenance window.
" } }, "com.amazonaws.elasticache#ReservedCacheNode": { @@ -14093,7 +14093,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The cache node type for the reserved cache nodes.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The cache node type for the reserved cache nodes.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
The cache node type for the reserved cache node.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The cache node type for the reserved cache node.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.
" + "smithy.api#documentation": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.
" } }, "PreferredAvailabilityZones": { @@ -14656,7 +14656,7 @@ "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.
" + "smithy.api#documentation": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.
" } }, "SubnetIds": { @@ -14668,13 +14668,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis only.
" + "smithy.api#documentation": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.
" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a\n specific time on a daily basis. Available for Redis only.
" + "smithy.api#documentation": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a\n specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.
" } } }, @@ -14771,60 +14771,60 @@ "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The identifier of a serverless cache snapshot. Available for Redis only.
" + "smithy.api#documentation": "The identifier of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "ARN": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis only.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis only.
" + "smithy.api#documentation": "The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "SnapshotType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The type of snapshot of serverless cache. Available for Redis only.
" + "smithy.api#documentation": "The type of snapshot of serverless cache. Available for Redis OSS and Serverless Memcached only.
" } }, "Status": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The current status of the serverless cache. Available for Redis only.
" + "smithy.api#documentation": "The current status of the serverless cache. Available for Redis OSS and Serverless Memcached only.
" } }, "CreateTime": { "target": "com.amazonaws.elasticache#TStamp", "traits": { - "smithy.api#documentation": "The date and time that the source serverless cache's metadata and cache data set was obtained for\n the snapshot. Available for Redis only.
" + "smithy.api#documentation": "The date and time that the source serverless cache's metadata and cache data set was obtained for\n the snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "ExpiryTime": { "target": "com.amazonaws.elasticache#TStamp", "traits": { - "smithy.api#documentation": "The time that the serverless cache snapshot will expire. Available for Redis only.
" + "smithy.api#documentation": "The time that the serverless cache snapshot will expire. Available for Redis OSS and Serverless Memcached only.
" } }, "BytesUsedForCache": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The total size of a serverless cache snapshot, in bytes. Available for Redis only.
" + "smithy.api#documentation": "The total size of a serverless cache snapshot, in bytes. Available for Redis OSS and Serverless Memcached only.
" } }, "ServerlessCacheConfiguration": { "target": "com.amazonaws.elasticache#ServerlessCacheConfiguration", "traits": { - "smithy.api#documentation": "The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis only.
" + "smithy.api#documentation": "The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis OSS and Serverless Memcached only.
" } } }, "traits": { - "smithy.api#documentation": "The resource representing a serverless cache snapshot. Available for Redis only.
" + "smithy.api#documentation": "The resource representing a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.
" } }, "com.amazonaws.elasticache#ServerlessCacheSnapshotAlreadyExistsFault": { @@ -14839,7 +14839,7 @@ "code": "ServerlessCacheSnapshotAlreadyExistsFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "A serverless cache snapshot with this name already exists. Available for Redis only.
", + "smithy.api#documentation": "A serverless cache snapshot with this name already exists. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14865,7 +14865,7 @@ "code": "ServerlessCacheSnapshotNotFoundFault", "httpResponseCode": 404 }, - "smithy.api#documentation": "This serverless cache snapshot could not be found or does not exist. Available for Redis only.
", + "smithy.api#documentation": "This serverless cache snapshot could not be found or does not exist. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -14882,7 +14882,7 @@ "code": "ServerlessCacheSnapshotQuotaExceededFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis only.
", + "smithy.api#documentation": "The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis OSS and Serverless Memcached only.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14958,13 +14958,13 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Elasticache engine to which the update applies. Either Redis or Memcached
" + "smithy.api#documentation": "The Elasticache engine to which the update applies. Either Redis OSS or Memcached.
" } }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Elasticache engine version to which the update applies. Either Redis or Memcached\n engine version
" + "smithy.api#documentation": "The Elasticache engine version to which the update applies. Either Redis OSS or Memcached\n engine version.
" } }, "AutoUpdateAfterRecommendedApplyByDate": { @@ -14981,7 +14981,7 @@ } }, "traits": { - "smithy.api#documentation": "An update that you can apply to your Redis clusters.
" + "smithy.api#documentation": "An update that you can apply to your Redis OSS clusters.
" } }, "com.amazonaws.elasticache#ServiceUpdateList": { @@ -15184,7 +15184,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The name of the compute and memory capacity node type for the source cluster.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis configuration variables appendonly
and\n appendfsync
are not supported on Redis version 2.8.22 and\n later.
The name of the compute and memory capacity node type for the source cluster.
\nThe following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.
\nGeneral purpose:
\nCurrent generation:
\n\n M7g node types:\n \t\t\t\t\tcache.m7g.large
,\n \t\t\t\t\tcache.m7g.xlarge
,\n \t\t\t\t\tcache.m7g.2xlarge
,\n \t\t\t\t\tcache.m7g.4xlarge
,\n \t\t\t\t\tcache.m7g.8xlarge
,\n \t\t\t\t\tcache.m7g.12xlarge
,\n \t\t\t\t\tcache.m7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n
\n M5 node types:\n cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n
\n M4 node types:\n cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n
\n T3 node types:\n cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n T1 node types:\n cache.t1.micro
\n
\n M1 node types:\n cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\nPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n C1 node types:\n cache.c1.xlarge
\n
Memory optimized:
\nCurrent generation:
\n\n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large
,\n\t\t\t\t\t\t\tcache.r7g.xlarge
,\n\t\t\t\t\t\t\tcache.r7g.2xlarge
,\n\t\t\t\t\t\t\tcache.r7g.4xlarge
,\n\t\t\t\t\t\t\tcache.r7g.8xlarge
,\n\t\t\t\t\t\t\tcache.r7g.12xlarge
,\n\t\t\t\t\t\t\tcache.r7g.16xlarge
\n
For region availability, see Supported Node Types\n
\n\n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n
\n R5 node types:\n cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\n cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\nAll current generation instance types are created in Amazon VPC by\n default.
\nRedis OSS append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis OSS Multi-AZ with automatic failover is not supported on T1\n instances.
\nRedis OSS configuration variables appendonly
and\n appendfsync
are not supported on Redis OSS version 2.8.22 and\n later.
The number of cache nodes in the source cluster.
\nFor clusters running Redis, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
" + "smithy.api#documentation": "The number of cache nodes in the source cluster.
\nFor clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.
" } }, "PreferredAvailabilityZone": { @@ -15262,7 +15262,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "If you are running Redis engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" + "smithy.api#documentation": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.
" } }, "SnapshotRetentionLimit": { @@ -15286,7 +15286,7 @@ "AutomaticFailover": { "target": "com.amazonaws.elasticache#AutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "Indicates the status of automatic failover for the source Redis replication\n group.
" + "smithy.api#documentation": "Indicates the status of automatic failover for the source Redis OSS replication\n group.
" } }, "NodeSnapshots": { @@ -15315,7 +15315,7 @@ } }, "traits": { - "smithy.api#documentation": "Represents a copy of an entire Redis cluster as of the time when the snapshot was\n taken.
" + "smithy.api#documentation": "Represents a copy of an entire Redis OSS cluster as of the time when the snapshot was\n taken.
" } }, "com.amazonaws.elasticache#SnapshotAlreadyExistsFault": { @@ -15356,7 +15356,7 @@ "code": "SnapshotFeatureNotSupportedFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "You attempted one of the following operations:
\nCreating a snapshot of a Redis cluster running on a\n cache.t1.micro
cache node.
Creating a snapshot of a cluster that is running Memcached rather than\n Redis.
\nNeither of these are supported by ElastiCache.
", + "smithy.api#documentation": "You attempted one of the following operations:
\nCreating a snapshot of a Redis OSS cluster running on a\n cache.t1.micro
cache node.
Creating a snapshot of a cluster that is running Memcached rather than\n Redis OSS.
\nNeither of these are supported by ElastiCache.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15504,7 +15504,7 @@ "target": "com.amazonaws.elasticache#CustomerNodeEndpointList", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "List of endpoints from which data should be migrated. For Redis (cluster mode\n disabled), list should have only one element.
", + "smithy.api#documentation": "List of endpoints from which data should be migrated. For Redis OSS (cluster mode\n disabled), list should have only one element.
", "smithy.api#required": {} } } @@ -15551,7 +15551,7 @@ "SupportedNetworkTypes": { "target": "com.amazonaws.elasticache#NetworkTypeList", "traits": { - "smithy.api#documentation": "Either ipv4
| ipv6
| dual_stack
. IPv6 is\n supported for workloads using Redis engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Either ipv4
| ipv6
| dual_stack
. IPv6 is\n supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.
Represents the input of a TestFailover
operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).
This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.
\n\n Note the following\n
\nA customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.
\nIf calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.
\n\n
If calling this operation multiple times on different shards in the same Redis\n (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.
\nTo determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:
\nReplication group message: Test Failover API called for node\n group
\n
Cache cluster message: Failover from primary node\n
\n
Replication group message: Failover from primary node\n
\n
Cache cluster message: Recovering cache nodes\n
\n
Cache cluster message: Finished recovery for cache nodes\n
\n
For more information see:
\n\n Viewing\n ElastiCache Events in the ElastiCache User\n Guide\n
\n\n DescribeEvents in the ElastiCache API Reference
\nAlso see, Testing\n Multi-AZ in the ElastiCache User Guide.
" + "smithy.api#documentation": "Represents the input of a TestFailover
operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).
This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.
\n\n Note the following\n
\nA customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.
\nIf calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.
\n\n
If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.
\nTo determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:
\nReplication group message: Test Failover API called for node\n group
\n
Cache cluster message: Failover from primary node\n
\n
Replication group message: Failover from primary node\n
\n
Cache cluster message: Recovering cache nodes\n
\n
Cache cluster message: Finished recovery for cache nodes\n
\n
For more information see:
\n\n Viewing\n ElastiCache Events in the ElastiCache User\n Guide\n
\n\n DescribeEvents in the ElastiCache API Reference
\nAlso see, Testing\n Multi-AZ in the ElastiCache User Guide.
" } }, "com.amazonaws.elasticache#TestFailoverMessage": { @@ -16066,7 +16066,7 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Elasticache engine to which the update applies. Either Redis or Memcached
" + "smithy.api#documentation": "The Elasticache engine to which the update applies. Either Redis OSS or Memcached.
" } } }, @@ -16221,7 +16221,7 @@ "MinimumEngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The minimum engine version required, which is Redis 6.0
" + "smithy.api#documentation": "The minimum engine version required, which is Redis OSS 6.0
" } }, "AccessString": { @@ -16285,7 +16285,7 @@ "Engine": { "target": "com.amazonaws.elasticache#EngineType", "traits": { - "smithy.api#documentation": "The current supported value is Redis.
" + "smithy.api#documentation": "The current supported value is Redis user.
" } }, "UserIds": { @@ -16297,7 +16297,7 @@ "MinimumEngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The minimum engine version required, which is Redis 6.0
" + "smithy.api#documentation": "The minimum engine version required, which is Redis OSS 6.0
" } }, "PendingChanges": { @@ -16315,7 +16315,7 @@ "ServerlessCaches": { "target": "com.amazonaws.elasticache#UGServerlessCacheIdList", "traits": { - "smithy.api#documentation": "Indicates which serverless caches the specified user group is associated with. Available for Redis only.
" + "smithy.api#documentation": "Indicates which serverless caches the specified user group is associated with. Available for Redis OSS and Serverless Memcached only.
" } }, "ARN": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index de4927d664..4c24f2afa5 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1640,6 +1640,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -1649,6 +1650,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2126,12 +2128,24 @@ }, "ca-central-1" : { "variants" : [ { + "hostname" : "athena-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "athena.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { + "hostname" : "athena-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "athena.ca-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -2184,6 +2198,20 @@ "tags" : [ "dualstack" ] } ] }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -5367,12 +5395,6 @@ } ] }, "endpoints" : { - "af-south-1" : { - "hostname" : "datazone.af-south-1.api.aws" - }, - "ap-east-1" : { - "hostname" : "datazone.ap-east-1.api.aws" - }, "ap-northeast-1" : { "hostname" : "datazone.ap-northeast-1.api.aws" }, @@ -5382,9 +5404,6 @@ "ap-northeast-3" : { "hostname" : "datazone.ap-northeast-3.api.aws" }, - "ap-south-1" : { - "hostname" : "datazone.ap-south-1.api.aws" - }, "ap-south-2" : { "hostname" : "datazone.ap-south-2.api.aws" }, @@ -5413,18 +5432,12 @@ "eu-central-1" : { "hostname" : "datazone.eu-central-1.api.aws" }, - "eu-central-2" : { - "hostname" : "datazone.eu-central-2.api.aws" - }, "eu-north-1" : { "hostname" : "datazone.eu-north-1.api.aws" }, "eu-south-1" : { "hostname" : "datazone.eu-south-1.api.aws" }, - "eu-south-2" : { - "hostname" : "datazone.eu-south-2.api.aws" - }, "eu-west-1" : { "hostname" : "datazone.eu-west-1.api.aws" }, @@ -5479,6 +5492,8 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -10811,8 +10826,18 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -10821,14 +10846,76 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-west-2.amazonaws.com" + }, "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "kinesisvideo" : { @@ -19056,6 +19143,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -20348,6 +20436,7 @@ }, "vpc-lattice" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -20356,6 +20445,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -23710,8 +23800,6 @@ }, "endpoints" : { "us-gov-east-1" : { - "hostname" : "autoscaling-plans.us-gov-east-1.amazonaws.com", - "protocols" : [ "http", "https" ], "variants" : [ { "hostname" : "autoscaling-plans.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] @@ -23723,8 +23811,6 @@ "protocols" : [ "http", "https" ] }, "us-gov-west-1" : { - "hostname" : "autoscaling-plans.us-gov-west-1.amazonaws.com", - "protocols" : [ "http", "https" ], "variants" : [ { "hostname" : "autoscaling-plans.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] @@ -25924,8 +26010,32 @@ }, "kinesisanalytics" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "kinesisvideo" : { @@ -26438,6 +26548,12 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-us-gov-global" }, + "oam" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "oidc" : { "endpoints" : { "us-gov-east-1" : { @@ -28329,7 +28445,8 @@ }, "apigateway" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "appconfig" : { @@ -29205,6 +29322,11 @@ "us-isob-east-1" : { } } }, + "apigateway" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "appconfig" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/models/entityresolution.json b/models/entityresolution.json index e72464b596..692e3eb224 100644 --- a/models/entityresolution.json +++ b/models/entityresolution.json @@ -819,7 +819,7 @@ } }, "traits": { - "smithy.api#documentation": "You do not have sufficient access to perform this action. HTTP Status Code:\n 403
\n
You do not have sufficient access to perform this action.
", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -884,7 +884,7 @@ "effect": { "target": "com.amazonaws.entityresolution#StatementEffect", "traits": { - "smithy.api#documentation": "Determines whether the permissions specified in the policy are to be allowed\n (Allow
) or denied (Deny
).
Determines whether the permissions specified in the policy are to be allowed\n (Allow
) or denied (Deny
).
If you set the value of the effect
parameter to Deny
for\n the AddPolicyStatement
operation, you must also set the value of the\n effect
parameter in the policy
to Deny
for the\n PutPolicy
operation.
The request could not be processed because of conflict in the current state of the\n resource. Example: Workflow already exists, Schema already exists, Workflow is currently\n running, etc. HTTP Status Code: 400
\n
The request could not be processed because of conflict in the current state of the\n resource. Example: Workflow already exists, Schema already exists, Workflow is currently\n running, etc.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -1157,15 +1157,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "An object which defines the idMappingType
and the\n providerProperties
.
An object which defines the ID mapping technique and any additional\n configurations.
", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.
", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.
" } }, "tags": { @@ -1218,15 +1219,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "An object which defines the idMappingType
and the\n providerProperties
.
An object which defines the ID mapping technique and any additional\n configurations.
", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.
", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to create resources on your behalf as part of workflow execution.
" } } }, @@ -2176,7 +2178,7 @@ } }, "traits": { - "smithy.api#documentation": "The request was rejected because it attempted to create resources beyond the current\n Entity Resolution account limits. The error message describes the limit exceeded.\n HTTP Status Code: 402
\n
The request was rejected because it attempted to create resources beyond the current\n Entity Resolution account limits. The error message describes the limit exceeded.\n
", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -2380,7 +2382,7 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "An object which defines the idMappingType
and the\n providerProperties
.
An object which defines the ID mapping technique and any additional\n configurations.
", "smithy.api#required": {} } }, @@ -2399,10 +2401,11 @@ } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.
", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.
" } }, "tags": { @@ -3221,13 +3224,13 @@ "inputRecords": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The total number of input records.
" + "smithy.api#documentation": "The total number of records that were input for processing.
" } }, "totalRecordsProcessed": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The total number of records processed.
" + "smithy.api#documentation": "The total number of records that were processed.
" } }, "recordsNotProcessed": { @@ -3235,10 +3238,28 @@ "traits": { "smithy.api#documentation": "The total number of records that did not get processed.
" } + }, + "totalMappedRecords": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The total number of records that were mapped.
" + } + }, + "totalMappedSourceRecords": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The total number of mapped source records.
" + } + }, + "totalMappedTargetRecords": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The total number of distinct mapped target records.
" + } } }, "traits": { - "smithy.api#documentation": "An object containing InputRecords
, TotalRecordsProcessed
,\n MatchIDs
, and RecordsNotProcessed
.
An object containing InputRecords
, RecordsNotProcessed
,\n TotalRecordsProcessed
, TotalMappedRecords
,\n TotalMappedSourceRecords
, and TotalMappedTargetRecords
.
The rules that can be used for ID mapping.
", + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "ruleDefinitionType": { + "target": "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionType", + "traits": { + "smithy.api#documentation": "The set of rules you can use in an ID mapping workflow. The limitations specified for\n the source or target to define the match rules must be compatible.
", + "smithy.api#required": {} + } + }, + "attributeMatchingModel": { + "target": "com.amazonaws.entityresolution#AttributeMatchingModel", + "traits": { + "smithy.api#documentation": "The comparison type. You can either choose ONE_TO_ONE
or\n MANY_TO_MANY
as the attributeMatchingModel
.
If you choose MANY_TO_MANY
, the system can match attributes across the\n sub-types of an attribute type. For example, if the value of the Email
field\n of Profile A matches the value of the BusinessEmail
field of Profile B, the\n two profiles are matched on the Email
attribute type.
If you choose ONE_TO_ONE
, the system can only match attributes if the\n sub-types are an exact match. For example, for the Email
attribute type, the\n system will only consider it a match if the value of the Email
field of\n Profile A matches the value of the Email
field of Profile B.
The type of matching record that is allowed to be used in an ID mapping workflow.
\nIf the value is set to ONE_SOURCE_TO_ONE_TARGET
, only one record in the\n source can be matched to the same record in the target.
If the value is set to MANY_SOURCE_TO_ONE_TARGET
, multiple records in the\n source can be matched to one record in the target.
An object that defines the list of matching rules to run in an ID mapping\n workflow.
" + } + }, "com.amazonaws.entityresolution#IdMappingTechniques": { "type": "structure", "members": { @@ -3291,6 +3361,12 @@ "smithy.api#required": {} } }, + "ruleBasedProperties": { + "target": "com.amazonaws.entityresolution#IdMappingRuleBasedProperties", + "traits": { + "smithy.api#documentation": "An object which defines any additional configurations required by rule-based\n matching.
" + } + }, "providerProperties": { "target": "com.amazonaws.entityresolution#ProviderProperties", "traits": { @@ -3299,7 +3375,7 @@ } }, "traits": { - "smithy.api#documentation": "An object which defines the ID mapping techniques and provider configurations.
" + "smithy.api#documentation": "An object which defines the ID mapping technique and any additional\n configurations.
" } }, "com.amazonaws.entityresolution#IdMappingType": { @@ -3310,6 +3386,12 @@ "traits": { "smithy.api#enumValue": "PROVIDER" } + }, + "RULE_BASED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RULE_BASED" + } } } }, @@ -3325,7 +3407,7 @@ "inputSourceARN": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "An Glue table ARN for the input source table.
", + "smithy.api#documentation": "An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for\n the input source table.
", "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})$", "smithy.api#required": {} } @@ -3339,7 +3421,7 @@ "type": { "target": "com.amazonaws.entityresolution#IdNamespaceType", "traits": { - "smithy.api#documentation": "The type of ID namespace. There are two types: SOURCE
and\n TARGET
.
The SOURCE
contains configurations for sourceId
data that will\n be processed in an ID mapping workflow.
The TARGET
contains a configuration of targetId
to which all\n sourceIds
will resolve to.
The type of ID namespace. There are two types: SOURCE
and\n TARGET
.
The SOURCE
contains configurations for sourceId
data that will\n be processed in an ID mapping workflow.
The TARGET
contains a configuration of targetId
which all\n sourceIds
will resolve to.
The type of ID mapping.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The settings for the ID namespace for the ID mapping workflow job.
" + } + }, + "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadataList": { + "type": "list", + "member": { + "target": "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadata" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowProperties": { "type": "structure", "members": { @@ -3450,6 +3582,12 @@ "smithy.api#required": {} } }, + "ruleBasedProperties": { + "target": "com.amazonaws.entityresolution#NamespaceRuleBasedProperties", + "traits": { + "smithy.api#documentation": "An object which defines any additional configurations required by rule-based\n matching.
" + } + }, "providerProperties": { "target": "com.amazonaws.entityresolution#NamespaceProviderProperties", "traits": { @@ -3458,7 +3596,7 @@ } }, "traits": { - "smithy.api#documentation": "An object containing IdMappingType
and\n ProviderProperties
.
An object containing IdMappingType
, ProviderProperties
, and\n RuleBasedProperties
.
An Glue table ARN for the input source table.
", + "smithy.api#documentation": "An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for\n the input source table.
", "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})$", "smithy.api#required": {} } @@ -3536,10 +3674,16 @@ "smithy.api#documentation": "The description of the ID namespace.
" } }, + "idMappingWorkflowProperties": { + "target": "com.amazonaws.entityresolution#IdNamespaceIdMappingWorkflowMetadataList", + "traits": { + "smithy.api#documentation": "An object which defines any additional configurations required by the ID mapping\n workflow.
" + } + }, "type": { "target": "com.amazonaws.entityresolution#IdNamespaceType", "traits": { - "smithy.api#documentation": "The type of ID namespace. There are two types: SOURCE
and\n TARGET
.
The SOURCE
contains configurations for sourceId
data that will\n be processed in an ID mapping workflow.
The TARGET
contains a configuration of targetId
to which all\n sourceIds
will resolve to.
The type of ID namespace. There are two types: SOURCE
and\n TARGET
.
The SOURCE
contains configurations for sourceId
data that will\n be processed in an ID mapping workflow.
The TARGET
contains a configuration of targetId
which all\n sourceIds
will resolve to.
An Glue table ARN for the input source table.
", + "smithy.api#documentation": "An Glue table Amazon Resource Name (ARN) for the input source\n table.
", "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})$", "smithy.api#required": {} } @@ -3668,7 +3812,7 @@ } }, "traits": { - "smithy.api#documentation": "This exception occurs when there is an internal failure in the Entity Resolution\n service. HTTP Status Code: 500
\n
This exception occurs when there is an internal failure in the Entity Resolution\n service.
", "smithy.api#error": "server", "smithy.api#httpError": 500, "smithy.api#retryable": {} @@ -4500,6 +4644,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.entityresolution#MatchPurpose": { + "type": "enum", + "members": { + "IDENTIFIER_GENERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IDENTIFIER_GENERATION" + } + }, + "INDEXING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INDEXING" + } + } + } + }, "com.amazonaws.entityresolution#MatchingKeys": { "type": "list", "member": { @@ -4582,6 +4743,42 @@ "smithy.api#documentation": "An object containing ProviderConfiguration
and\n ProviderServiceArn
.
The rules for the ID namespace.
", + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "ruleDefinitionTypes": { + "target": "com.amazonaws.entityresolution#IdMappingWorkflowRuleDefinitionTypeList", + "traits": { + "smithy.api#documentation": "The sets of rules you can use in an ID mapping workflow. The limitations specified for\n the source and target must be compatible.
" + } + }, + "attributeMatchingModel": { + "target": "com.amazonaws.entityresolution#AttributeMatchingModel", + "traits": { + "smithy.api#documentation": "The comparison type. You can either choose ONE_TO_ONE
or\n MANY_TO_MANY
as the attributeMatchingModel
.
If you choose MANY_TO_MANY
, the system can match attributes across the\n sub-types of an attribute type. For example, if the value of the Email
field\n of Profile A matches the value of BusinessEmail
field of Profile B, the two\n profiles are matched on the Email
attribute type.
If you choose ONE_TO_ONE
, the system can only match attributes if the\n sub-types are an exact match. For example, for the Email
attribute type, the\n system will only consider it a match if the value of the Email
field of\n Profile A matches the value of the Email
field of Profile B.
The type of matching record that is allowed to be used in an ID mapping workflow.
\nIf the value is set to ONE_SOURCE_TO_ONE_TARGET
, only one record in the\n source is matched to one record in the target.
If the value is set to MANY_SOURCE_TO_ONE_TARGET
, all matching records in\n the source are matched to one record in the target.
The rule-based properties of an ID namespace. These properties define how the ID\n namespace can be used in an ID mapping workflow.
" + } + }, "com.amazonaws.entityresolution#NextToken": { "type": "string", "traits": { @@ -4733,7 +4930,7 @@ "providerTargetConfigurationDefinition": { "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "Configurations required for the target ID namespace.
" + "smithy.api#documentation": "Configurations required for the target ID namespace.
" } }, "providerSourceConfigurationDefinition": { @@ -4995,7 +5192,7 @@ "policy": { "target": "com.amazonaws.entityresolution#PolicyDocument", "traits": { - "smithy.api#documentation": "The resource-based policy.
", + "smithy.api#documentation": "The resource-based policy.
\nIf you set the value of the effect
parameter in the policy
\n to Deny
for the PutPolicy
operation, you must also set the\n value of the effect
parameter to Deny
for the\n AddPolicyStatement
operation.
The resource could not be found. HTTP Status Code: 404
\n
The resource could not be found.
", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -5182,13 +5402,19 @@ "attributeMatchingModel": { "target": "com.amazonaws.entityresolution#AttributeMatchingModel", "traits": { - "smithy.api#documentation": "The comparison type. You can either choose ONE_TO_ONE
or\n MANY_TO_MANY
as the AttributeMatchingModel. When choosing\n MANY_TO_MANY
, the system can match attributes across the sub-types of an\n attribute type. For example, if the value of the Email
field of Profile A and\n the value of BusinessEmail
field of Profile B matches, the two profiles are\n matched on the Email
type. When choosing ONE_TO_ONE
,the system\n can only match if the sub-types are exact matches. For example, only when the value of the\n Email
field of Profile A and the value of the Email
field of\n Profile B matches, the two profiles are matched on the Email
type.
The comparison type. You can either choose ONE_TO_ONE
or\n MANY_TO_MANY
as the attributeMatchingModel
.
If you choose MANY_TO_MANY
, the system can match attributes across the\n sub-types of an attribute type. For example, if the value of the Email
field\n of Profile A and the value of BusinessEmail
field of Profile B matches, the\n two profiles are matched on the Email
attribute type.
If you choose ONE_TO_ONE
, the system can only match attributes if the\n sub-types are an exact match. For example, for the Email
attribute type, the\n system will only consider it a match if the value of the Email
field of\n Profile A matches the value of the Email
field of Profile B.
An indicator of whether to generate IDs and index the data or not.
\nIf you choose IDENTIFIER_GENERATION
, the process generates IDs and indexes\n the data.
If you choose INDEXING
, the process indexes the data without generating\n IDs.
An object which defines the list of matching rules to run and has a field\n Rules
, which is a list of rule objects.
An object which defines the list of matching rules to run in a matching workflow.\n RuleBasedProperties contain a Rules
field, which is a list of rule\n objects.
A key that allows grouping of multiple input attributes into a unified matching group.\n For example, consider a scenario where the source table contains various addresses, such as\n business_address
and shipping_address
. By assigning a\n matchKey
called address
to both attributes, Entity Resolution\n will match records across these fields to create a consolidated matching group. If no\n matchKey
is specified for a column, it won't be utilized for matching\n purposes but will still be included in the output table.
A key that allows grouping of multiple input attributes into a unified matching group.
\nFor example, consider a scenario where the source table contains various addresses, such\n as business_address
and shipping_address
. By assigning a\n matchKey
called address
to both attributes, Entity Resolution\n will match records across these fields to create a consolidated matching group.
If no matchKey
is specified for a column, it won't be utilized for matching\n purposes but will still be included in the output table.
The subtype of the attribute, selected from a list of values.
" } + }, + "hashed": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": " Indicates if the column values are hashed in the schema input. If the value is set to\n TRUE
, the column values are hashed. If the value is set to\n FALSE
, the column values are cleartext.
An object containing FieldName
, Type
, GroupName
,\n MatchKey
, and SubType
.
An object containing FieldName
, Type
, GroupName
,\n MatchKey
, Hashing
, and SubType
.
The request was denied due to request throttling. HTTP Status Code:\n 429
\n
The request was denied due to request throttling.
", "smithy.api#error": "client", "smithy.api#httpError": 429, "smithy.api#retryable": { @@ -5825,7 +6057,7 @@ "min": 1, "max": 760 }, - "smithy.api#pattern": "^[a-zA-Z_0-9-,]*$" + "smithy.api#pattern": "^[a-zA-Z_0-9-+=/,]*$" } }, "com.amazonaws.entityresolution#UniqueIdList": { @@ -5958,15 +6190,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "An object which defines the idMappingType
and the\n providerProperties
.
An object which defines the ID mapping technique and any additional\n configurations.
", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.
", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.
" } } }, @@ -6013,15 +6246,16 @@ "idMappingTechniques": { "target": "com.amazonaws.entityresolution#IdMappingTechniques", "traits": { - "smithy.api#documentation": "An object which defines the idMappingType
and the\n providerProperties
.
An object which defines the ID mapping technique and any additional\n configurations.
", "smithy.api#required": {} } }, "roleArn": { - "target": "com.amazonaws.entityresolution#RoleArn", + "target": "com.amazonaws.entityresolution#IdMappingRoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.
", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes\n this role to access Amazon Web Services resources on your behalf.
" } } }, @@ -6427,7 +6661,7 @@ } }, "traits": { - "smithy.api#documentation": "The input fails to satisfy the constraints specified by Entity Resolution. HTTP\n Status Code: 400
\n
The input fails to satisfy the constraints specified by Entity Resolution.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/models/eventbridge.json b/models/eventbridge.json index 9756587b89..e61df2d72e 100644 --- a/models/eventbridge.json +++ b/models/eventbridge.json @@ -4801,7 +4801,22 @@ } ], "traits": { - "smithy.api#documentation": "Describes the specified rule.
\nDescribeRule does not list the targets of a rule. To see the targets associated with a\n rule, use ListTargetsByRule.
" + "smithy.api#documentation": "Describes the specified rule.
\nDescribeRule does not list the targets of a rule. To see the targets associated with a\n rule, use ListTargetsByRule.
", + "smithy.test#smokeTests": [ + { + "id": "DescribeRuleFailure", + "params": { + "Name": "fake-rule" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.eventbridge#DescribeRuleRequest": { @@ -6537,7 +6552,20 @@ } ], "traits": { - "smithy.api#documentation": "Lists your Amazon EventBridge rules. You can either list all the rules or you can\n provide a prefix to match to the rule names.
\nThe maximum number of results per page for requests is 100.
\nListRules does not list the targets of a rule. To see the targets associated with a rule,\n use ListTargetsByRule.
" + "smithy.api#documentation": "Lists your Amazon EventBridge rules. You can either list all the rules or you can\n provide a prefix to match to the rule names.
\nThe maximum number of results per page for requests is 100.
\nListRules does not list the targets of a rule. To see the targets associated with a rule,\n use ListTargetsByRule.
", + "smithy.test#smokeTests": [ + { + "id": "ListRulesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.eventbridge#ListRulesRequest": { diff --git a/models/fis.json b/models/fis.json index c5c244f6ee..472715c049 100644 --- a/models/fis.json +++ b/models/fis.json @@ -1183,6 +1183,48 @@ "com.amazonaws.fis#ExperimentEndTime": { "type": "timestamp" }, + "com.amazonaws.fis#ExperimentError": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.fis#ExperimentErrorAccountId", + "traits": { + "smithy.api#documentation": "The Amazon Web Services Account ID where the experiment failure occurred.
" + } + }, + "code": { + "target": "com.amazonaws.fis#ExperimentErrorCode", + "traits": { + "smithy.api#documentation": "The error code for the failed experiment.
" + } + }, + "location": { + "target": "com.amazonaws.fis#ExperimentErrorLocation", + "traits": { + "smithy.api#documentation": "Context for the section of the experiment template that failed.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the error when an experiment has failed
.
The reason for the state.
" } + }, + "error": { + "target": "com.amazonaws.fis#ExperimentError", + "traits": { + "smithy.api#documentation": "The error information of the experiment when the action has failed
.
Fault Injection Service is a managed service that enables you to perform fault injection \n experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide.
", + "smithy.api#documentation": "Amazon Web Services Fault Injection Service is a managed service that enables you to perform fault injection \n experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide.
", "smithy.api#title": "AWS Fault Injection Simulator", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/glue.json b/models/glue.json index 1f7dff017e..c9055373e8 100644 --- a/models/glue.json +++ b/models/glue.json @@ -78,6 +78,9 @@ { "target": "com.amazonaws.glue#BatchGetWorkflows" }, + { + "target": "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotation" + }, { "target": "com.amazonaws.glue#BatchStopJobRun" }, @@ -309,6 +312,12 @@ { "target": "com.amazonaws.glue#GetDataflowGraph" }, + { + "target": "com.amazonaws.glue#GetDataQualityModel" + }, + { + "target": "com.amazonaws.glue#GetDataQualityModelResult" + }, { "target": "com.amazonaws.glue#GetDataQualityResult" }, @@ -486,6 +495,12 @@ { "target": "com.amazonaws.glue#ListDataQualityRulesets" }, + { + "target": "com.amazonaws.glue#ListDataQualityStatisticAnnotations" + }, + { + "target": "com.amazonaws.glue#ListDataQualityStatistics" + }, { "target": "com.amazonaws.glue#ListDevEndpoints" }, @@ -525,6 +540,9 @@ { "target": "com.amazonaws.glue#PutDataCatalogEncryptionSettings" }, + { + "target": "com.amazonaws.glue#PutDataQualityProfileAnnotation" + }, { "target": "com.amazonaws.glue#PutResourcePolicy" }, @@ -2221,6 +2239,44 @@ "smithy.api#documentation": "Specifies an Amazon Redshift target.
" } }, + "com.amazonaws.glue#AnnotationError": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID for the failed annotation.
" + } + }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID for the failed annotation.
" + } + }, + "FailureReason": { + "target": "com.amazonaws.glue#DescriptionString", + "traits": { + "smithy.api#documentation": "The reason why the annotation failed.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A failed annotation.
" + } + }, + "com.amazonaws.glue#AnnotationErrorList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#AnnotationError" + } + }, + "com.amazonaws.glue#AnnotationList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#StatisticAnnotation" + } + }, "com.amazonaws.glue#ApplyMapping": { "type": "structure", "members": { @@ -2560,6 +2616,12 @@ "smithy.api#required": {} } }, + "PartitionKeys": { + "target": "com.amazonaws.glue#GlueStudioPathList", + "traits": { + "smithy.api#documentation": "The partition keys used to distribute data across multiple partitions or shards based on a specific key or set of key.
" + } + }, "Database": { "target": "com.amazonaws.glue#EnclosedInStringProperty", "traits": { @@ -3724,6 +3786,67 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotation": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationRequest" + }, + "output": { + "target": "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#ResourceNumberLimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "Annotate datapoints over time for a specific data quality statistic.
" + } + }, + "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationRequest": { + "type": "structure", + "members": { + "InclusionAnnotations": { + "target": "com.amazonaws.glue#InclusionAnnotationList", + "traits": { + "smithy.api#documentation": "A list of DatapointInclusionAnnotation
's.
Client Token.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#BatchPutDataQualityStatisticAnnotationResponse": { + "type": "structure", + "members": { + "FailedInclusionAnnotations": { + "target": "com.amazonaws.glue#AnnotationErrorList", + "traits": { + "smithy.api#documentation": "A list of AnnotationError
's.
These key-value pairs define parameters for the connection:
\n\n HOST
- The host URI: either the\n fully qualified domain name (FQDN) or the IPv4 address of\n the database host.
\n PORT
- The port number, between\n 1024 and 65535, of the port on which the database host is\n listening for database connections.
\n USER_NAME
- The name under which\n to log in to the database. The value string for USER_NAME
is \"USERNAME
\".
\n PASSWORD
- A password,\n if one is used, for the user name.
\n ENCRYPTED_PASSWORD
- When you enable connection password protection by setting ConnectionPasswordEncryption
in the Data Catalog encryption settings, this field stores the encrypted password.
\n JDBC_DRIVER_JAR_URI
- The Amazon Simple Storage Service (Amazon S3) path of the\n JAR file that contains the JDBC driver to use.
\n JDBC_DRIVER_CLASS_NAME
- The class name of the JDBC driver to use.
\n JDBC_ENGINE
- The name of the JDBC engine to use.
\n JDBC_ENGINE_VERSION
- The version of the JDBC engine to use.
\n CONFIG_FILES
- (Reserved for future use.)
\n INSTANCE_ID
- The instance ID to use.
\n JDBC_CONNECTION_URL
- The URL for connecting to a JDBC data source.
\n JDBC_ENFORCE_SSL
- A Boolean string (true, false) specifying whether Secure\n Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the\n client. The default is false.
\n CUSTOM_JDBC_CERT
- An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
\n SKIP_CUSTOM_JDBC_CERT_VALIDATION
- By default, this is false
. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true
to skip Glue’s validation of the customer certificate.
\n CUSTOM_JDBC_CERT_STRING
- A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN
; in Microsoft SQL Server, this is used as the hostNameInCertificate
.
\n CONNECTION_URL
- The URL for connecting to a general (non-JDBC) data source.
\n SECRET_ID
- The secret ID used for the secret manager of credentials.
\n CONNECTOR_URL
- The connector URL for a MARKETPLACE or CUSTOM connection.
\n CONNECTOR_TYPE
- The connector type for a MARKETPLACE or CUSTOM connection.
\n CONNECTOR_CLASS_NAME
- The connector class name for a MARKETPLACE or CUSTOM connection.
\n KAFKA_BOOTSTRAP_SERVERS
- A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
\n KAFKA_SSL_ENABLED
- Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
\n KAFKA_CUSTOM_CERT
- The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
\n KAFKA_SKIP_CUSTOM_CERT_VALIDATION
- Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
\n KAFKA_CLIENT_KEYSTORE
- The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).
\n KAFKA_CLIENT_KEYSTORE_PASSWORD
- The password to access the provided keystore (Optional).
\n KAFKA_CLIENT_KEY_PASSWORD
- A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).
\n ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD
- The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).
\n ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD
- The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).
\n KAFKA_SASL_MECHANISM
- \"SCRAM-SHA-512\"
, \"GSSAPI\"
, \"AWS_MSK_IAM\"
, or \"PLAIN\"
. These are the supported SASL Mechanisms.
\n KAFKA_SASL_PLAIN_USERNAME
- A plaintext username used to authenticate with the \"PLAIN\" mechanism.
\n KAFKA_SASL_PLAIN_PASSWORD
- A plaintext password used to authenticate with the \"PLAIN\" mechanism.
\n ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD
- The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).
\n KAFKA_SASL_SCRAM_USERNAME
- A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.
\n KAFKA_SASL_SCRAM_PASSWORD
- A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.
\n ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD
- The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).
\n KAFKA_SASL_SCRAM_SECRETS_ARN
- The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.
\n KAFKA_SASL_GSSAPI_KEYTAB
- The S3 location of a Kerberos keytab
file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.
\n KAFKA_SASL_GSSAPI_KRB5_CONF
- The S3 location of a Kerberos krb5.conf
file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.
\n KAFKA_SASL_GSSAPI_SERVICE
- The Kerberos service name, as set with sasl.kerberos.service.name
in your Kafka Configuration.
\n KAFKA_SASL_GSSAPI_PRINCIPAL
- The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.
These key-value pairs define parameters for the connection:
\n\n HOST
- The host URI: either the\n fully qualified domain name (FQDN) or the IPv4 address of\n the database host.
\n PORT
- The port number, between\n 1024 and 65535, of the port on which the database host is\n listening for database connections.
\n USER_NAME
- The name under which\n to log in to the database. The value string for USER_NAME
is \"USERNAME
\".
\n PASSWORD
- A password,\n if one is used, for the user name.
\n ENCRYPTED_PASSWORD
- When you enable connection password protection by setting ConnectionPasswordEncryption
in the Data Catalog encryption settings, this field stores the encrypted password.
\n JDBC_DRIVER_JAR_URI
- The Amazon Simple Storage Service (Amazon S3) path of the\n JAR file that contains the JDBC driver to use.
\n JDBC_DRIVER_CLASS_NAME
- The class name of the JDBC driver to use.
\n JDBC_ENGINE
- The name of the JDBC engine to use.
\n JDBC_ENGINE_VERSION
- The version of the JDBC engine to use.
\n CONFIG_FILES
- (Reserved for future use.)
\n INSTANCE_ID
- The instance ID to use.
\n JDBC_CONNECTION_URL
- The URL for connecting to a JDBC data source.
\n JDBC_ENFORCE_SSL
- A Boolean string (true, false) specifying whether Secure\n Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the\n client. The default is false.
\n CUSTOM_JDBC_CERT
- An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
\n SKIP_CUSTOM_JDBC_CERT_VALIDATION
- By default, this is false
. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true
to skip Glue’s validation of the customer certificate.
\n CUSTOM_JDBC_CERT_STRING
- A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN
; in Microsoft SQL Server, this is used as the hostNameInCertificate
.
\n CONNECTION_URL
- The URL for connecting to a general (non-JDBC) data source.
\n SECRET_ID
- The secret ID used for the secret manager of credentials.
\n CONNECTOR_URL
- The connector URL for a MARKETPLACE or CUSTOM connection.
\n CONNECTOR_TYPE
- The connector type for a MARKETPLACE or CUSTOM connection.
\n CONNECTOR_CLASS_NAME
- The connector class name for a MARKETPLACE or CUSTOM connection.
\n KAFKA_BOOTSTRAP_SERVERS
- A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
\n KAFKA_SSL_ENABLED
- Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
\n KAFKA_CUSTOM_CERT
- The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
\n KAFKA_SKIP_CUSTOM_CERT_VALIDATION
- Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
\n KAFKA_CLIENT_KEYSTORE
- The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).
\n KAFKA_CLIENT_KEYSTORE_PASSWORD
- The password to access the provided keystore (Optional).
\n KAFKA_CLIENT_KEY_PASSWORD
- A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).
\n ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD
- The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).
\n ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD
- The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).
\n KAFKA_SASL_MECHANISM
- \"SCRAM-SHA-512\"
, \"GSSAPI\"
, \"AWS_MSK_IAM\"
, or \"PLAIN\"
. These are the supported SASL Mechanisms.
\n KAFKA_SASL_PLAIN_USERNAME
- A plaintext username used to authenticate with the \"PLAIN\" mechanism.
\n KAFKA_SASL_PLAIN_PASSWORD
- A plaintext password used to authenticate with the \"PLAIN\" mechanism.
\n ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD
- The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).
\n KAFKA_SASL_SCRAM_USERNAME
- A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.
\n KAFKA_SASL_SCRAM_PASSWORD
- A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.
\n ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD
- The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).
\n KAFKA_SASL_SCRAM_SECRETS_ARN
- The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.
\n KAFKA_SASL_GSSAPI_KEYTAB
- The S3 location of a Kerberos keytab
file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.
\n KAFKA_SASL_GSSAPI_KRB5_CONF
- The S3 location of a Kerberos krb5.conf
file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.
\n KAFKA_SASL_GSSAPI_SERVICE
- The Kerberos service name, as set with sasl.kerberos.service.name
in your Kafka Configuration.
\n KAFKA_SASL_GSSAPI_PRINCIPAL
- The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.
\n ROLE_ARN
- The role to be used for running queries.
\n REGION
- The Amazon Web Services Region where queries will be run.
\n WORKGROUP_NAME
- The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.
\n CLUSTER_IDENTIFIER
- The cluster identifier of an Amazon Redshift cluster in which queries will run.
\n DATABASE
- The Amazon Redshift database that you are connecting to.
The type of the connection. Currently, these types are supported:
\n\n JDBC
- Designates a connection to a database through Java Database Connectivity (JDBC).
\n JDBC
Connections use the following ConnectionParameters.
Required: All of (HOST
, PORT
, JDBC_ENGINE
) or JDBC_CONNECTION_URL
.
Required: All of (USERNAME
, PASSWORD
) or SECRET_ID
.
Optional: JDBC_ENFORCE_SSL
, CUSTOM_JDBC_CERT
, CUSTOM_JDBC_CERT_STRING
, SKIP_CUSTOM_JDBC_CERT_VALIDATION
. These parameters are used to configure SSL with JDBC.
\n KAFKA
- Designates a connection to an Apache Kafka streaming platform.
\n KAFKA
Connections use the following ConnectionParameters.
Required: KAFKA_BOOTSTRAP_SERVERS
.
Optional: KAFKA_SSL_ENABLED
, KAFKA_CUSTOM_CERT
, KAFKA_SKIP_CUSTOM_CERT_VALIDATION
. These parameters are used to configure SSL with KAFKA
.
Optional: KAFKA_CLIENT_KEYSTORE
, KAFKA_CLIENT_KEYSTORE_PASSWORD
, KAFKA_CLIENT_KEY_PASSWORD
, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD
, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD
. These parameters are used to configure TLS client configuration with SSL in KAFKA
.
Optional: KAFKA_SASL_MECHANISM
. Can be specified as SCRAM-SHA-512
, GSSAPI
, or AWS_MSK_IAM
.
Optional: KAFKA_SASL_SCRAM_USERNAME
, KAFKA_SASL_SCRAM_PASSWORD
, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD
. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA
.
Optional: KAFKA_SASL_GSSAPI_KEYTAB
, KAFKA_SASL_GSSAPI_KRB5_CONF
, KAFKA_SASL_GSSAPI_SERVICE
, KAFKA_SASL_GSSAPI_PRINCIPAL
. These parameters are used to configure SASL/GSSAPI authentication with KAFKA
.
\n MONGODB
- Designates a connection to a MongoDB document database.
\n MONGODB
Connections use the following ConnectionParameters.
Required: CONNECTION_URL
.
Required: All of (USERNAME
, PASSWORD
) or SECRET_ID
.
\n SALESFORCE
- Designates a connection to Salesforce using OAuth authencation.
Requires the AuthenticationConfiguration
member to be configured.
\n NETWORK
- Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).
\n NETWORK
Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.
\n MARKETPLACE
- Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.
\n MARKETPLACE
Connections use the following ConnectionParameters.
Required: CONNECTOR_TYPE
, CONNECTOR_URL
, CONNECTOR_CLASS_NAME
, CONNECTION_URL
.
Required for JDBC
\n CONNECTOR_TYPE
connections: All of (USERNAME
, PASSWORD
) or SECRET_ID
.
\n CUSTOM
- Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.
\n SFTP
is not supported.
For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.
\nFor more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.
", + "smithy.api#documentation": "The type of the connection. Currently, these types are supported:
\n\n JDBC
- Designates a connection to a database through Java Database Connectivity (JDBC).
\n JDBC
Connections use the following ConnectionParameters.
Required: All of (HOST
, PORT
, JDBC_ENGINE
) or JDBC_CONNECTION_URL
.
Required: All of (USERNAME
, PASSWORD
) or SECRET_ID
.
Optional: JDBC_ENFORCE_SSL
, CUSTOM_JDBC_CERT
, CUSTOM_JDBC_CERT_STRING
, SKIP_CUSTOM_JDBC_CERT_VALIDATION
. These parameters are used to configure SSL with JDBC.
\n KAFKA
- Designates a connection to an Apache Kafka streaming platform.
\n KAFKA
Connections use the following ConnectionParameters.
Required: KAFKA_BOOTSTRAP_SERVERS
.
Optional: KAFKA_SSL_ENABLED
, KAFKA_CUSTOM_CERT
, KAFKA_SKIP_CUSTOM_CERT_VALIDATION
. These parameters are used to configure SSL with KAFKA
.
Optional: KAFKA_CLIENT_KEYSTORE
, KAFKA_CLIENT_KEYSTORE_PASSWORD
, KAFKA_CLIENT_KEY_PASSWORD
, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD
, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD
. These parameters are used to configure TLS client configuration with SSL in KAFKA
.
Optional: KAFKA_SASL_MECHANISM
. Can be specified as SCRAM-SHA-512
, GSSAPI
, or AWS_MSK_IAM
.
Optional: KAFKA_SASL_SCRAM_USERNAME
, KAFKA_SASL_SCRAM_PASSWORD
, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD
. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA
.
Optional: KAFKA_SASL_GSSAPI_KEYTAB
, KAFKA_SASL_GSSAPI_KRB5_CONF
, KAFKA_SASL_GSSAPI_SERVICE
, KAFKA_SASL_GSSAPI_PRINCIPAL
. These parameters are used to configure SASL/GSSAPI authentication with KAFKA
.
\n MONGODB
- Designates a connection to a MongoDB document database.
\n MONGODB
Connections use the following ConnectionParameters.
Required: CONNECTION_URL
.
Required: All of (USERNAME
, PASSWORD
) or SECRET_ID
.
\n SALESFORCE
- Designates a connection to Salesforce using OAuth authencation.
Requires the AuthenticationConfiguration
member to be configured.
\n VIEW_VALIDATION_REDSHIFT
- Designates a connection used for view validation by Amazon Redshift.
\n VIEW_VALIDATION_ATHENA
- Designates a connection used for view validation by Amazon Athena.
\n NETWORK
- Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).
\n NETWORK
Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.
\n MARKETPLACE
- Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.
\n MARKETPLACE
Connections use the following ConnectionParameters.
Required: CONNECTOR_TYPE
, CONNECTOR_URL
, CONNECTOR_CLASS_NAME
, CONNECTION_URL
.
Required for JDBC
\n CONNECTOR_TYPE
connections: All of (USERNAME
, PASSWORD
) or SECRET_ID
.
\n CUSTOM
- Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.
\n SFTP
is not supported.
For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.
\nFor more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.
", "smithy.api#required": {} } }, @@ -7127,6 +7250,30 @@ "traits": { "smithy.api#enumValue": "ROLE_ARN" } + }, + "REGION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REGION" + } + }, + "WORKGROUP_NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WORKGROUP_NAME" + } + }, + "CLUSTER_IDENTIFIER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLUSTER_IDENTIFIER" + } + }, + "DATABASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATABASE" + } } } }, @@ -7203,6 +7350,18 @@ "traits": { "smithy.api#enumValue": "SALESFORCE" } + }, + "VIEW_VALIDATION_REDSHIFT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VIEW_VALIDATION_REDSHIFT" + } + }, + "VIEW_VALIDATION_ATHENA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VIEW_VALIDATION_ATHENA" + } } } }, @@ -8503,6 +8662,12 @@ "smithy.api#documentation": "A target table associated with the data quality ruleset.
" } }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "The name of the security configuration created with the data quality encryption option.
" + } + }, "ClientToken": { "target": "com.amazonaws.glue#HashString", "traits": { @@ -11121,6 +11286,29 @@ "smithy.api#documentation": "Describes the data quality metric value according to the analysis of historical data.
" } }, + "com.amazonaws.glue#DataQualityModelStatus": { + "type": "enum", + "members": { + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.glue#DataQualityObservation": { "type": "structure", "members": { @@ -11148,7 +11336,8 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$", + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#DataQualityObservations": { @@ -11172,6 +11361,12 @@ "smithy.api#documentation": "A unique result ID for the data quality result.
" } }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID for the data quality result.
" + } + }, "Score": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { @@ -11458,6 +11653,12 @@ "traits": { "smithy.api#documentation": "A map of metrics associated with the evaluation of the rule.
" } + }, + "EvaluatedRule": { + "target": "com.amazonaws.glue#DataQualityRuleResultDescription", + "traits": { + "smithy.api#documentation": "The evaluated rule.
" + } } }, "traits": { @@ -11471,7 +11672,8 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$", + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#DataQualityRuleResultStatus": { @@ -11926,6 +12128,32 @@ } } }, + "com.amazonaws.glue#DatapointInclusionAnnotation": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The ID of the data quality profile the statistic belongs to.
" + } + }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "The inclusion annotation value to apply to the statistic.
" + } + } + }, + "traits": { + "smithy.api#documentation": "An Inclusion Annotation.
" + } + }, "com.amazonaws.glue#Datatype": { "type": "structure", "members": { @@ -14686,6 +14914,9 @@ }, "value": { "target": "com.amazonaws.glue#NullableDouble" + }, + "traits": { + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#EvaluationMetrics": { @@ -15670,7 +15901,20 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves the status of a migration operation.
" + "smithy.api#documentation": "Retrieves the status of a migration operation.
", + "smithy.test#smokeTests": [ + { + "id": "GetCatalogImportStatusSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.glue#GetCatalogImportStatusRequest": { @@ -16605,6 +16849,153 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#GetDataQualityModel": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetDataQualityModelRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetDataQualityModelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieve the training status of the model along with more information (CompletedOn, StartedOn, FailureReason).
" + } + }, + "com.amazonaws.glue#GetDataQualityModelRequest": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
" + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#GetDataQualityModelResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.glue#DataQualityModelStatus", + "traits": { + "smithy.api#documentation": "The training status of the data quality model.
" + } + }, + "StartedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp when the data quality model training started.
" + } + }, + "CompletedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp when the data quality model training completed.
" + } + }, + "FailureReason": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The training failure reason.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.glue#GetDataQualityModelResult": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetDataQualityModelResultRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetDataQualityModelResultResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieve a statistic's predictions for a given Profile ID.
" + } + }, + "com.amazonaws.glue#GetDataQualityModelResultRequest": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
", + "smithy.api#required": {} + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#GetDataQualityModelResultResponse": { + "type": "structure", + "members": { + "CompletedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp when the data quality model training completed.
" + } + }, + "Model": { + "target": "com.amazonaws.glue#StatisticModelResults", + "traits": { + "smithy.api#documentation": "A list of StatisticModelResult
\n
A unique result ID for the data quality result.
" } }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID for the data quality result.
" + } + }, "Score": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { @@ -16854,6 +17251,12 @@ "traits": { "smithy.api#documentation": "The name of the ruleset that was created by the run.
" } + }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "The name of the security configuration created with the data quality encryption option.
" + } } }, "traits": { @@ -17085,6 +17488,12 @@ "traits": { "smithy.api#documentation": "When a ruleset was created from a recommendation run, this run ID is generated to link the two together.
" } + }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "The name of the security configuration created with the data quality encryption option.
" + } } }, "traits": { @@ -19748,6 +20157,12 @@ "traits": { "smithy.api#documentation": "The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId
.
Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.
" + } } }, "traits": { @@ -20030,6 +20445,18 @@ "traits": { "smithy.api#documentation": "The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId
.
Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.
" + } + }, + "AttributesToGet": { + "target": "com.amazonaws.glue#TableAttributesList", + "traits": { + "smithy.api#documentation": " Specifies the table fields returned by the GetTables
call. This parameter doesn’t accept an empty list. The request must include NAME
.
The following are the valid combinations of values:
\n\n NAME
- Names of all tables in the database.
\n NAME
, TABLE_TYPE
- Names of all tables and the table types.
A continuation token, present if the current list segment is\n not the last.
" + "smithy.api#documentation": "A continuation token, present if the current list segment is not the last.
" } } }, @@ -21912,6 +22339,29 @@ "smithy.api#documentation": "Specifies configuration properties for an importing labels task run.
" } }, + "com.amazonaws.glue#InclusionAnnotationList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#DatapointInclusionAnnotation" + } + }, + "com.amazonaws.glue#InclusionAnnotationValue": { + "type": "enum", + "members": { + "INCLUDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCLUDE" + } + }, + "EXCLUDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXCLUDE" + } + } + } + }, "com.amazonaws.glue#Integer": { "type": "integer", "traits": { @@ -24400,13 +24850,87 @@ "smithy.api#input": {} } }, - "com.amazonaws.glue#ListDataQualityRuleRecommendationRunsResponse": { + "com.amazonaws.glue#ListDataQualityRuleRecommendationRunsResponse": { + "type": "structure", + "members": { + "Runs": { + "target": "com.amazonaws.glue#DataQualityRuleRecommendationRunList", + "traits": { + "smithy.api#documentation": "A list of DataQualityRuleRecommendationRunDescription
objects.
A pagination token, if more results are available.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.glue#ListDataQualityRulesetEvaluationRuns": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest" + }, + "output": { + "target": "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "Lists all the runs meeting the filter criteria, where a ruleset is evaluated against a data source.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest": { + "type": "structure", + "members": { + "Filter": { + "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunFilter", + "traits": { + "smithy.api#documentation": "The filter criteria.
" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "A paginated token to offset the results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.glue#PageSize", + "traits": { + "smithy.api#documentation": "The maximum number of results to return.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse": { "type": "structure", "members": { "Runs": { - "target": "com.amazonaws.glue#DataQualityRuleRecommendationRunList", + "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunList", "traits": { - "smithy.api#documentation": "A list of DataQualityRuleRecommendationRunDescription
objects.
A list of DataQualityRulesetEvaluationRunDescription
objects representing data quality ruleset runs.
Lists all the runs meeting the filter criteria, where a ruleset is evaluated against a data source.
", + "smithy.api#documentation": "Returns a paginated list of rulesets for the specified list of Glue tables.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -24448,25 +24975,109 @@ } } }, - "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsRequest": { + "com.amazonaws.glue#ListDataQualityRulesetsRequest": { "type": "structure", "members": { + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "A paginated token to offset the results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.glue#PageSize", + "traits": { + "smithy.api#documentation": "The maximum number of results to return.
" + } + }, "Filter": { - "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunFilter", + "target": "com.amazonaws.glue#DataQualityRulesetFilterCriteria", "traits": { - "smithy.api#documentation": "The filter criteria.
" + "smithy.api#documentation": "The filter criteria.
" + } + }, + "Tags": { + "target": "com.amazonaws.glue#TagsMap", + "traits": { + "smithy.api#documentation": "A list of key-value pair tags.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#ListDataQualityRulesetsResponse": { + "type": "structure", + "members": { + "Rulesets": { + "target": "com.amazonaws.glue#DataQualityRulesetList", + "traits": { + "smithy.api#documentation": "A paginated list of rulesets for the specified list of Glue tables.
" } }, "NextToken": { "target": "com.amazonaws.glue#PaginationToken", "traits": { - "smithy.api#documentation": "A paginated token to offset the results.
" + "smithy.api#documentation": "A pagination token, if more results are available.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.glue#ListDataQualityStatisticAnnotations": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#ListDataQualityStatisticAnnotationsRequest" + }, + "output": { + "target": "com.amazonaws.glue#ListDataQualityStatisticAnnotationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieve annotations for a data quality statistic.
" + } + }, + "com.amazonaws.glue#ListDataQualityStatisticAnnotationsRequest": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
" + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID.
" + } + }, + "TimestampFilter": { + "target": "com.amazonaws.glue#TimestampFilter", + "traits": { + "smithy.api#documentation": "A timestamp filter.
" } }, "MaxResults": { "target": "com.amazonaws.glue#PageSize", "traits": { - "smithy.api#documentation": "The maximum number of results to return.
" + "smithy.api#documentation": "The maximum number of results to return in this request.
" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "A pagination token to retrieve the next set of results.
" } } }, @@ -24474,19 +25085,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.glue#ListDataQualityRulesetEvaluationRunsResponse": { + "com.amazonaws.glue#ListDataQualityStatisticAnnotationsResponse": { "type": "structure", "members": { - "Runs": { - "target": "com.amazonaws.glue#DataQualityRulesetEvaluationRunList", + "Annotations": { + "target": "com.amazonaws.glue#AnnotationList", "traits": { - "smithy.api#documentation": "A list of DataQualityRulesetEvaluationRunDescription
objects representing data quality ruleset runs.
A list of StatisticAnnotation
applied to the Statistic
A pagination token, if more results are available.
" + "smithy.api#documentation": "A pagination token to retrieve the next set of results.
" } } }, @@ -24494,13 +25105,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.glue#ListDataQualityRulesets": { + "com.amazonaws.glue#ListDataQualityStatistics": { "type": "operation", "input": { - "target": "com.amazonaws.glue#ListDataQualityRulesetsRequest" + "target": "com.amazonaws.glue#ListDataQualityStatisticsRequest" }, "output": { - "target": "com.amazonaws.glue#ListDataQualityRulesetsResponse" + "target": "com.amazonaws.glue#ListDataQualityStatisticsResponse" }, "errors": [ { @@ -24511,45 +25122,43 @@ }, { "target": "com.amazonaws.glue#InvalidInputException" - }, - { - "target": "com.amazonaws.glue#OperationTimeoutException" } ], "traits": { - "smithy.api#documentation": "Returns a paginated list of rulesets for the specified list of Glue tables.
", - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } + "smithy.api#documentation": "Retrieves a list of data quality statistics.
" } }, - "com.amazonaws.glue#ListDataQualityRulesetsRequest": { + "com.amazonaws.glue#ListDataQualityStatisticsRequest": { "type": "structure", "members": { - "NextToken": { - "target": "com.amazonaws.glue#PaginationToken", + "StatisticId": { + "target": "com.amazonaws.glue#HashString", "traits": { - "smithy.api#documentation": "A paginated token to offset the results.
" + "smithy.api#documentation": "The Statistic ID.
" } }, - "MaxResults": { - "target": "com.amazonaws.glue#PageSize", + "ProfileId": { + "target": "com.amazonaws.glue#HashString", "traits": { - "smithy.api#documentation": "The maximum number of results to return.
" + "smithy.api#documentation": "The Profile ID.
" } }, - "Filter": { - "target": "com.amazonaws.glue#DataQualityRulesetFilterCriteria", + "TimestampFilter": { + "target": "com.amazonaws.glue#TimestampFilter", "traits": { - "smithy.api#documentation": "The filter criteria.
" + "smithy.api#documentation": "A timestamp filter.
" } }, - "Tags": { - "target": "com.amazonaws.glue#TagsMap", + "MaxResults": { + "target": "com.amazonaws.glue#PageSize", "traits": { - "smithy.api#documentation": "A list of key-value pair tags.
" + "smithy.api#documentation": "The maximum number of results to return in this request.
" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#PaginationToken", + "traits": { + "smithy.api#documentation": "A pagination token to request the next page of results.
" } } }, @@ -24557,19 +25166,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.glue#ListDataQualityRulesetsResponse": { + "com.amazonaws.glue#ListDataQualityStatisticsResponse": { "type": "structure", "members": { - "Rulesets": { - "target": "com.amazonaws.glue#DataQualityRulesetList", + "Statistics": { + "target": "com.amazonaws.glue#StatisticSummaryList", "traits": { - "smithy.api#documentation": "A paginated list of rulesets for the specified list of Glue tables.
" + "smithy.api#documentation": "A StatisticSummaryList
.
A pagination token, if more results are available.
" + "smithy.api#documentation": "A pagination token to request the next page of results.
" } } }, @@ -26227,6 +26836,12 @@ "smithy.api#documentation": "The name of the data quality metric used for generating the observation.
" } }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
" + } + }, "MetricValues": { "target": "com.amazonaws.glue#DataQualityMetricValues", "traits": { @@ -28077,6 +28692,59 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#PutDataQualityProfileAnnotation": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#PutDataQualityProfileAnnotationRequest" + }, + "output": { + "target": "com.amazonaws.glue#PutDataQualityProfileAnnotationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "Annotate all datapoints for a Profile.
" + } + }, + "com.amazonaws.glue#PutDataQualityProfileAnnotationRequest": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The ID of the data quality monitoring profile to annotate.
", + "smithy.api#required": {} + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "The inclusion annotation value to apply to the profile.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#PutDataQualityProfileAnnotationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "Left blank.
", + "smithy.api#output": {} + } + }, "com.amazonaws.glue#PutResourcePolicy": { "type": "operation", "input": { @@ -28785,6 +29453,12 @@ "smithy.api#documentation": "Specifies a target that uses Amazon Redshift.
" } }, + "com.amazonaws.glue#ReferenceDatasetsList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#NameString" + } + }, "com.amazonaws.glue#RegisterSchemaVersion": { "type": "operation", "input": { @@ -29197,6 +29871,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#ResourceAction": { + "type": "enum", + "members": { + "UPDATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE" + } + }, + "CREATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE" + } + } + } + }, "com.amazonaws.glue#ResourceNotReadyException": { "type": "structure", "members": { @@ -29250,6 +29941,41 @@ } } }, + "com.amazonaws.glue#ResourceState": { + "type": "enum", + "members": { + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESS" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.glue#ResourceType": { "type": "enum", "members": { @@ -29416,6 +30142,26 @@ "com.amazonaws.glue#RunId": { "type": "string" }, + "com.amazonaws.glue#RunIdentifier": { + "type": "structure", + "members": { + "RunId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Run ID.
" + } + }, + "JobRunId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Job Run ID.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A run identifier.
" + } + }, "com.amazonaws.glue#RunMetrics": { "type": "structure", "members": { @@ -31119,6 +31865,12 @@ "traits": { "smithy.api#documentation": "Allows you to specify that you want to search the tables shared with your account. The allowable values are FOREIGN
or ALL
.
If set to FOREIGN
, will search the tables shared with your account.
If set to ALL
, will search the tables shared with your account, as well as the tables in yor local account.
Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.
" + } } }, "traits": { @@ -32507,6 +33259,12 @@ "smithy.api#documentation": "A name for the ruleset.
" } }, + "DataQualitySecurityConfiguration": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "The name of the security configuration created with the data quality encryption option.
" + } + }, "ClientToken": { "target": "com.amazonaws.glue#HashString", "traits": { @@ -33357,6 +34115,237 @@ } } }, + "com.amazonaws.glue#StatisticAnnotation": { + "type": "structure", + "members": { + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID.
" + } + }, + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
" + } + }, + "StatisticRecordedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp when the annotated statistic was recorded.
" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#TimestampedInclusionAnnotation", + "traits": { + "smithy.api#documentation": "The inclusion annotation applied to the statistic.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A Statistic Annotation.
" + } + }, + "com.amazonaws.glue#StatisticEvaluationLevel": { + "type": "enum", + "members": { + "DATASET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Dataset" + } + }, + "COLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Column" + } + }, + "MULTICOLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Multicolumn" + } + } + } + }, + "com.amazonaws.glue#StatisticModelResult": { + "type": "structure", + "members": { + "LowerBound": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "The lower bound.
" + } + }, + "UpperBound": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "The upper bound.
" + } + }, + "PredictedValue": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "The predicted value.
" + } + }, + "ActualValue": { + "target": "com.amazonaws.glue#NullableDouble", + "traits": { + "smithy.api#documentation": "The actual value.
" + } + }, + "Date": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The date.
" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "The inclusion annotation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The statistic model result.
" + } + }, + "com.amazonaws.glue#StatisticModelResults": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#StatisticModelResult" + } + }, + "com.amazonaws.glue#StatisticNameString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[A-Z][A-Za-z\\.]+$" + } + }, + "com.amazonaws.glue#StatisticPropertiesMap": { + "type": "map", + "key": { + "target": "com.amazonaws.glue#NameString" + }, + "value": { + "target": "com.amazonaws.glue#DescriptionString" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.glue#StatisticSummary": { + "type": "structure", + "members": { + "StatisticId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Statistic ID.
" + } + }, + "ProfileId": { + "target": "com.amazonaws.glue#HashString", + "traits": { + "smithy.api#documentation": "The Profile ID.
" + } + }, + "RunIdentifier": { + "target": "com.amazonaws.glue#RunIdentifier", + "traits": { + "smithy.api#documentation": "The Run Identifier
" + } + }, + "StatisticName": { + "target": "com.amazonaws.glue#StatisticNameString", + "traits": { + "smithy.api#documentation": "The name of the statistic.
" + } + }, + "DoubleValue": { + "target": "com.amazonaws.glue#Double", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The value of the statistic.
" + } + }, + "EvaluationLevel": { + "target": "com.amazonaws.glue#StatisticEvaluationLevel", + "traits": { + "smithy.api#documentation": "The evaluation level of the statistic. Possible values: Dataset
, Column
, Multicolumn
.
The list of columns referenced by the statistic.
" + } + }, + "ReferencedDatasets": { + "target": "com.amazonaws.glue#ReferenceDatasetsList", + "traits": { + "smithy.api#documentation": "The list of datasets referenced by the statistic.
" + } + }, + "StatisticProperties": { + "target": "com.amazonaws.glue#StatisticPropertiesMap", + "traits": { + "smithy.api#documentation": "A StatisticPropertiesMap
, which contains a NameString
and DescriptionString
\n
The timestamp when the statistic was recorded.
" + } + }, + "InclusionAnnotation": { + "target": "com.amazonaws.glue#TimestampedInclusionAnnotation", + "traits": { + "smithy.api#documentation": "The inclusion annotation for the statistic.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Summary information about a statistic.
" + } + }, + "com.amazonaws.glue#StatisticSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#StatisticSummary" + }, + "traits": { + "smithy.api#documentation": "A list of StatisticSummary
.
A Table
object representing the requested changes.
A list of ViewValidation
objects that contain information for an analytical engine to validate a view.
A structure containing information about an asynchronous change to a table.
" + } + }, "com.amazonaws.glue#StopColumnStatisticsTaskRun": { "type": "operation", "input": { @@ -34015,12 +35004,38 @@ "traits": { "smithy.api#documentation": "Specifies whether the view supports the SQL dialects of one or more different query engines and can therefore be read by those engines.
" } + }, + "Status": { + "target": "com.amazonaws.glue#TableStatus" } }, "traits": { "smithy.api#documentation": "Represents a collection of related data organized in columns and rows.
" } }, + "com.amazonaws.glue#TableAttributes": { + "type": "enum", + "members": { + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NAME" + } + }, + "TABLE_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TABLE_TYPE" + } + } + } + }, + "com.amazonaws.glue#TableAttributesList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#TableAttributes" + } + }, "com.amazonaws.glue#TableError": { "type": "structure", "members": { @@ -34324,6 +35339,62 @@ } } }, + "com.amazonaws.glue#TableStatus": { + "type": "structure", + "members": { + "RequestedBy": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "The ARN of the user who requested the asynchronous change.
" + } + }, + "UpdatedBy": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "The ARN of the user to last manually alter the asynchronous change (requesting cancellation, etc).
" + } + }, + "RequestTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "An ISO 8601 formatted date string indicating the time that the change was initiated.
" + } + }, + "UpdateTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "An ISO 8601 formatted date string indicating the time that the state was last updated.
" + } + }, + "Action": { + "target": "com.amazonaws.glue#ResourceAction", + "traits": { + "smithy.api#documentation": "Indicates which action was called on the table, currently only CREATE
or UPDATE
.
A generic status for the change in progress, such as QUEUED, IN_PROGRESS, SUCCESS, or FAILED.
" + } + }, + "Error": { + "target": "com.amazonaws.glue#ErrorDetail", + "traits": { + "smithy.api#documentation": "An error that will only appear when the state is \"FAILED\". This is a parent level exception message, there may be different Error
s for each dialect.
A StatusDetails
object with information about the requested change.
A structure containing information about the state of an asynchronous change to a table.
" + } + }, "com.amazonaws.glue#TableTypeString": { "type": "string", "traits": { @@ -34824,9 +35895,49 @@ "com.amazonaws.glue#Timestamp": { "type": "timestamp" }, + "com.amazonaws.glue#TimestampFilter": { + "type": "structure", + "members": { + "RecordedBefore": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp before which statistics should be included in the results.
" + } + }, + "RecordedAfter": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp after which statistics should be included in the results.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A timestamp filter.
" + } + }, "com.amazonaws.glue#TimestampValue": { "type": "timestamp" }, + "com.amazonaws.glue#TimestampedInclusionAnnotation": { + "type": "structure", + "members": { + "Value": { + "target": "com.amazonaws.glue#InclusionAnnotationValue", + "traits": { + "smithy.api#documentation": "The inclusion annotation value.
" + } + }, + "LastModifiedOn": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp when the inclusion annotation was last modified.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A timestamped inclusion annotation.
" + } + }, "com.amazonaws.glue#Token": { "type": "string" }, @@ -38171,6 +39282,56 @@ } } }, + "com.amazonaws.glue#ViewValidation": { + "type": "structure", + "members": { + "Dialect": { + "target": "com.amazonaws.glue#ViewDialect", + "traits": { + "smithy.api#documentation": "The dialect of the query engine.
" + } + }, + "DialectVersion": { + "target": "com.amazonaws.glue#ViewDialectVersionString", + "traits": { + "smithy.api#documentation": "The version of the dialect of the query engine. For example, 3.0.0.
" + } + }, + "ViewValidationText": { + "target": "com.amazonaws.glue#ViewTextString", + "traits": { + "smithy.api#documentation": "The SELECT
query that defines the view, as provided by the customer.
The time of the last update.
" + } + }, + "State": { + "target": "com.amazonaws.glue#ResourceState", + "traits": { + "smithy.api#documentation": "The state of the validation.
" + } + }, + "Error": { + "target": "com.amazonaws.glue#ErrorDetail", + "traits": { + "smithy.api#documentation": "An error associated with the validation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A structure that contains information for an analytical engine to validate a view, prior to persisting the view metadata. Used in the case of direct UpdateTable
or CreateTable
API calls.
\n Ephemeris data in Orbit Ephemeris Message (OEM) format.\n
\n\n Position, velocity, and acceleration units must be represented in km
, km/s
, and\n km/s**2
, respectively, in ephemeris data lines. Covariance matrix line units must be\n represented in km**2
if computed from two positions, km**2/s
if computed from one\n position and one velocity, and km**2/s**2
if computed from two velocities. Consult section\n 7.7.2 of The Consultative Committee for Space Data Systems (CCSDS)\n Recommended Standard for Orbit Data Messages\n for more information.\n
\n Ephemeris data in Orbit Ephemeris Message (OEM) format.\n
\n\n AWS Ground Station processes OEM Customer Provided Ephemerides according to the CCSDS standard with some extra restrictions. OEM files should be in KVN format. For more detail about the OEM format that AWS Ground Station supports, see OEM ephemeris format in the AWS Ground Station user guide.\n
" } }, "com.amazonaws.groundstation#PaginationMaxResults": { diff --git a/models/iam.json b/models/iam.json index b03777a860..35d29c7874 100644 --- a/models/iam.json +++ b/models/iam.json @@ -1995,8 +1995,7 @@ "LastUsedDate": { "target": "com.amazonaws.iam#dateType", "traits": { - "smithy.api#documentation": "The date and time, in ISO 8601 date-time\n format, when the access key was most recently used. This field is null in the\n following situations:
\nThe user does not have an access key.
\nAn access key exists but has not been used since IAM began tracking this\n information.
\nThere is no sign-in data associated with the user.
\nThe date and time, in ISO 8601 date-time\n format, when the access key was most recently used. This field is null in the\n following situations:
\nThe user does not have an access key.
\nAn access key exists but has not been used since IAM began tracking this\n information.
\nThere is no sign-in data associated with the user.
\nCreates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).
\nThe OIDC provider that you create with this operation can be used as a principal in a\n role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and\n the OIDC provider.
\nIf you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't\n need to create a separate IAM identity provider. These OIDC identity providers are\n already built-in to Amazon Web Services and are available for your use. Instead, you can move directly\n to creating new roles using your identity provider. To learn more, see Creating\n a role for web identity or OpenID connect federation in the IAM\n User Guide.
\nWhen you create the IAM OIDC provider, you specify the following:
\nThe URL of the OIDC identity provider (IdP) to trust
\nA list of client IDs (also known as audiences) that identify the application\n or applications allowed to authenticate using the OIDC provider
\nA list of tags that are attached to the specified IAM OIDC provider
\nA list of thumbprints of one or more server certificates that the IdP\n uses
\nYou get all of this information from the OIDC IdP you want to use to access\n Amazon Web Services.
\nAmazon Web Services secures communication with some OIDC identity providers (IdPs) through our library\n of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to\n verify your IdP server certificate. In these cases, your legacy thumbprint remains in your\n configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub,\n GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS)\n endpoint.
\nThe trust for the OIDC provider is derived from the IAM provider that this\n operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged\n users.
\nCreates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).
\nThe OIDC provider that you create with this operation can be used as a principal in a\n role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and\n the OIDC provider.
\nIf you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't\n need to create a separate IAM identity provider. These OIDC identity providers are\n already built-in to Amazon Web Services and are available for your use. Instead, you can move directly\n to creating new roles using your identity provider. To learn more, see Creating\n a role for web identity or OpenID connect federation in the IAM\n User Guide.
\nWhen you create the IAM OIDC provider, you specify the following:
\nThe URL of the OIDC identity provider (IdP) to trust
\nA list of client IDs (also known as audiences) that identify the application\n or applications allowed to authenticate using the OIDC provider
\nA list of tags that are attached to the specified IAM OIDC provider
\nA list of thumbprints of one or more server certificates that the IdP\n uses
\nYou get all of this information from the OIDC IdP you want to use to access\n Amazon Web Services.
\nAmazon Web Services secures communication with OIDC identity providers (IdPs) using our library of\n trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS)\n endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed\n by one of these trusted CAs, only then we secure communication using the thumbprints set\n in the IdP's configuration.
\nThe trust for the OIDC provider is derived from the IAM provider that this\n operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged\n users.
\nLists the account alias associated with the Amazon Web Services account (Note: you can have only\n one). For information about using an Amazon Web Services account alias, see Creating,\n deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In\n User Guide.
", + "smithy.api#documentation": "Lists the account alias associated with the Amazon Web Services account (Note: you can have only\n one). For information about using an Amazon Web Services account alias, see Creating,\n deleting, and listing an Amazon Web Services account alias in the\n IAM User Guide.
", "smithy.api#examples": [ { "title": "To list account aliases", @@ -10918,7 +10932,20 @@ "outputToken": "Marker", "items": "Users", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListUsersSuccess", + "params": {}, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.iam#ListUsersRequest": { @@ -11300,7 +11327,7 @@ "code": "OpenIdIdpCommunicationError", "httpResponseCode": 400 }, - "smithy.api#documentation": "The request failed because IAM cannot connect to the OpenID Connect identity provider URL.
", + "smithy.api#documentation": "The request failed because IAM cannot connect to the OpenID Connect identity provider\n URL.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14918,7 +14945,7 @@ } ], "traits": { - "smithy.api#documentation": "Replaces the existing list of server certificate thumbprints associated with an OpenID\n Connect (OIDC) provider resource object with a new list of thumbprints.
\nThe list that you pass with this operation completely replaces the existing list of\n thumbprints. (The lists are not merged.)
\nTypically, you need to update a thumbprint only when the identity provider certificate\n changes, which occurs rarely. However, if the provider's certificate\n does change, any attempt to assume an IAM role that specifies\n the OIDC provider as a principal fails until the certificate thumbprint is\n updated.
\nAmazon Web Services secures communication with some OIDC identity providers (IdPs) through our library\n of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to\n verify your IdP server certificate. In these cases, your legacy thumbprint remains in your\n configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub,\n GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS)\n endpoint.
\nTrust for the OIDC provider is derived from the provider certificate and is\n validated by the thumbprint. Therefore, it is best to limit access to the\n UpdateOpenIDConnectProviderThumbprint
operation to highly\n privileged users.
Replaces the existing list of server certificate thumbprints associated with an OpenID\n Connect (OIDC) provider resource object with a new list of thumbprints.
\nThe list that you pass with this operation completely replaces the existing list of\n thumbprints. (The lists are not merged.)
\nTypically, you need to update a thumbprint only when the identity provider certificate\n changes, which occurs rarely. However, if the provider's certificate\n does change, any attempt to assume an IAM role that specifies\n the OIDC provider as a principal fails until the certificate thumbprint is\n updated.
\nAmazon Web Services secures communication with OIDC identity providers (IdPs) using our library of\n trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS)\n endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed\n by one of these trusted CAs, only then we secure communication using the thumbprints set\n in the IdP's configuration.
\nTrust for the OIDC provider is derived from the provider certificate and is\n validated by the thumbprint. Therefore, it is best to limit access to the\n UpdateOpenIDConnectProviderThumbprint
operation to highly\n privileged users.
\n The ID of the asset composite model.\n
" + "smithy.api#documentation": "The ID of the asset composite model.
" } }, "externalId": { @@ -1548,7 +1548,7 @@ "externalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "An external ID to assign to the asset model.
\nIf the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using \n UpdateAssetModelCompositeModel
and specifying the derived ID of the model or property from the created model it's a part of.
An external ID to assign to the asset model.
\nIf the composite model is a derived composite model, or one nested inside a component\n model, you can only set the external ID using UpdateAssetModelCompositeModel
and\n specifying the derived ID of the model or property from the created model it's a part\n of.
The external ID of the hierarchy, if it has one. When you update an asset hierarchy,\n you may assign an external ID if it doesn't already have one. You can't change the external ID\n of an asset hierarchy that already has one. For more information, see Using external IDs in the IoT SiteWise User Guide.
" + "smithy.api#documentation": "The external ID of the hierarchy, if it has one. When you update an asset hierarchy, you\n may assign an external ID if it doesn't already have one. You can't change the external ID of\n an asset hierarchy that already has one. For more information, see Using external IDs in the IoT SiteWise User Guide.
" } } }, @@ -1724,7 +1724,7 @@ "id": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "\n The ID of the asset model composite model.\n
" + "smithy.api#documentation": "The ID of the asset model composite model.
" } }, "externalId": { @@ -1828,7 +1828,7 @@ "id": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "The ID of the the composite model that this summary describes..
", + "smithy.api#documentation": "The ID of the composite model that this summary describes..
", "smithy.api#required": {} } }, @@ -1841,21 +1841,21 @@ "name": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "The name of the the composite model that this summary describes..
", + "smithy.api#documentation": "The name of the composite model that this summary describes..
", "smithy.api#required": {} } }, "type": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "The type of asset model.
\n\n ASSET_MODEL – (default) An asset model that you can use to create assets.\n Can't be included as a component in another asset model.
\n\n COMPONENT_MODEL – A reusable component that you can include in the composite\n models of other asset models. You can't create assets directly from this type of asset model.
\nThe composite model type. Valid values are AWS/ALARM
, CUSTOM
, or AWS/L4E_ANOMALY
.
The description of the the composite model that this summary describes..
" + "smithy.api#documentation": "The description of the composite model that this summary describes..
" } }, "path": { @@ -1887,7 +1887,7 @@ "id": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "The ID of the asset model hierarchy. This ID is a hierarchyId
.
If you are callling UpdateAssetModel\n to create a new hierarchy: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.
\nIf you are calling UpdateAssetModel to modify an existing\n hierarchy: This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.
The ID of the asset model hierarchy. This ID is a hierarchyId
.
If you are callling UpdateAssetModel to create a\n new hierarchy: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.
\nIf you are calling UpdateAssetModel to modify an existing\n hierarchy: This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.
The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the\n childAssetModelId
asset model. IoT SiteWise will always return the actual\n asset model ID for this value. However, when you are specifying this value as part of a call to\n UpdateAssetModel, you may provide either the asset model ID or else externalId:
\n followed by the asset model's external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.\n
The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the\n childAssetModelId
asset model. IoT SiteWise will always return the actual\n asset model ID for this value. However, when you are specifying this value as part of a call to\n UpdateAssetModel, you may provide either the asset model ID or else externalId:
\n followed by the asset model's external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.\n
An external ID to assign to the asset model hierarchy. The external ID must be unique among\n asset model hierarchies within this asset model. For more information, see Using external IDs in the IoT SiteWise User Guide.
" + "smithy.api#documentation": "An external ID to assign to the asset model hierarchy. The external ID must be unique\n among asset model hierarchies within this asset model. For more information, see Using external IDs in the IoT SiteWise User Guide.
" } } }, @@ -1967,7 +1967,7 @@ "id": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "The ID of the asset model property.
\nIf you are callling UpdateAssetModel\n to create a new property: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.
\nIf you are calling UpdateAssetModel to modify an existing\n property: This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.
The ID of the asset model property.
\nIf you are callling UpdateAssetModel to create a\n new property: You can specify its ID here, if desired.\n IoT SiteWise automatically generates a unique ID for you, so this parameter is never required.\n However, if you prefer to supply your own ID instead, you can specify it here in UUID format.\n If you specify your own ID, it must be globally unique.
\nIf you are calling UpdateAssetModel to modify an existing\n property: This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.
\n The ID of the composite model that contains the asset model property.\n
" + "smithy.api#documentation": "The ID of the composite model that contains the asset model property.
" } }, "path": { @@ -2461,7 +2461,7 @@ "unit": { "target": "com.amazonaws.iotsitewise#PropertyUnit", "traits": { - "smithy.api#documentation": "\n The unit of measure (such as Newtons or RPM) of the asset property.\n
" + "smithy.api#documentation": "The unit of measure (such as Newtons or RPM) of the asset property.
" } }, "notification": { @@ -2470,7 +2470,7 @@ "assetCompositeModelId": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "\n The ID of the composite model that contains the asset property.\n
" + "smithy.api#documentation": "The ID of the composite model that contains the asset property.
" } }, "path": { @@ -2763,7 +2763,7 @@ "hierarchyId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow different groupings\n of assets to be formed that all come from the same asset model. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.
The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow\n different groupings of assets to be formed that all come from the same asset model. For more\n information, see Asset hierarchies in the IoT SiteWise User Guide.
The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.
\nThe size of the result set is equal to 1 MB.
\nThe number of data points in the result set is equal to the value of\n maxResults
. The maximum value of maxResults
is 4000.
The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.
\nThe size of the result set is equal to 1 MB.
\nThe number of data points in the result set is equal to the value of\n maxResults
. The maximum value of maxResults
is 4000.
\n The ID of the composite model that contains the property.\n
" + "smithy.api#documentation": "The ID of the composite model that contains the property.
" } }, "externalId": { @@ -4490,7 +4496,7 @@ } }, "traits": { - "smithy.api#documentation": "Metadata for the composition relationship established by using composedAssetModelId
in \n CreateAssetModelCompositeModel
\n .
Metadata for the composition relationship established by using\n composedAssetModelId
in \n CreateAssetModelCompositeModel
\n .
Represents a composite model that composed an asset model of type COMPONENT_MODEL
.
Represents a composite model that composed an asset model of type\n COMPONENT_MODEL
.
The composite model type. Valid values are AWS/ALARM
, CUSTOM
, or AWS/L4E_ANOMALY
.
The composite model type. Valid values are AWS/ALARM
, CUSTOM
, or\n AWS/L4E_ANOMALY
.
Creates an asset model from specified property and hierarchy definitions. You create\n assets from asset models. With asset models, you can easily create assets of the same type\n that have standardized definitions. Each asset created from a model inherits the asset model's\n property and hierarchy definitions. For more information, see Defining asset models in the\n IoT SiteWise User Guide.
\nYou can create two types of asset models, ASSET_MODEL
or COMPONENT_MODEL
.
\n ASSET_MODEL – (default) An asset model that you can use to create assets.\n Can't be included as a component in another asset model.
\n\n COMPONENT_MODEL – A reusable component that you can include in the composite\n models of other asset models. You can't create assets directly from this type of asset model.
\nCreates an asset model from specified property and hierarchy definitions. You create\n assets from asset models. With asset models, you can easily create assets of the same type\n that have standardized definitions. Each asset created from a model inherits the asset model's\n property and hierarchy definitions. For more information, see Defining asset models in the\n IoT SiteWise User Guide.
\nYou can create two types of asset models, ASSET_MODEL
or\n COMPONENT_MODEL
.
\n ASSET_MODEL – (default) An asset model that\n you can use to create assets. Can't be included as a component in another asset\n model.
\n\n COMPONENT_MODEL – A reusable component that\n you can include in the composite models of other asset models. You can't create\n assets directly from this type of asset model.
\nCreates a custom composite model from specified property and hierarchy definitions. There are two types of custom composite models,\n inline
and component-model-based
.
Use component-model-based custom composite models to define standard, reusable components. A component-model-based custom composite model consists of a name,\n a description, and the ID of the component model it references. A component-model-based custom composite model has no properties of its own; its referenced\n component model provides its associated properties to any created assets. For more information, see\n Custom composite models (Components) in the\n IoT SiteWise User Guide.
\nUse inline custom composite models to organize the properties of an asset model. The properties of inline custom composite models are local to the asset model where they are\n included and can't be used to create multiple assets.
\nTo create a component-model-based model, specify the composedAssetModelId
of an existing asset model with assetModelType
of COMPONENT_MODEL
.
To create an inline model, specify the assetModelCompositeModelProperties
and don't include an composedAssetModelId
.
Creates a custom composite model from specified property and hierarchy definitions. There\n are two types of custom composite models, inline
and\n component-model-based
.
Use component-model-based custom composite models to define standard, reusable components.\n A component-model-based custom composite model consists of a name, a description, and the ID\n of the component model it references. A component-model-based custom composite model has no\n properties of its own; its referenced component model provides its associated properties to\n any created assets. For more information, see Custom composite models (Components)\n in the IoT SiteWise User Guide.
\nUse inline custom composite models to organize the properties of an asset model. The\n properties of inline custom composite models are local to the asset model where they are\n included and can't be used to create multiple assets.
\nTo create a component-model-based model, specify the composedAssetModelId
of\n an existing asset model with assetModelType
of\n COMPONENT_MODEL
.
To create an inline model, specify the assetModelCompositeModelProperties
and\n don't include an composedAssetModelId
.
An external ID to assign to the composite model.
\nIf the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using \n UpdateAssetModelCompositeModel
and specifying the derived ID of the model or property from the created model it's a part of.
An external ID to assign to the composite model.
\nIf the composite model is a derived composite model, or one nested inside a component\n model, you can only set the external ID using UpdateAssetModelCompositeModel
and\n specifying the derived ID of the model or property from the created model it's a part\n of.
The ID of the composite model. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, \n if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique.
" + "smithy.api#documentation": "The ID of the composite model. IoT SiteWise automatically generates a unique ID for you, so this\n parameter is never required. However, if you prefer to supply your own ID instead, you can\n specify it here in UUID format. If you specify your own ID, it must be globally unique.
" } }, "assetModelCompositeModelDescription": { @@ -4938,7 +4945,7 @@ "assetModelCompositeModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "A unique, friendly name for the composite model.
", + "smithy.api#documentation": "A unique name for the composite model.
", "smithy.api#required": {} } }, @@ -4959,13 +4966,13 @@ "composedAssetModelId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "The ID of a composite model on this asset.
" + "smithy.api#documentation": "The ID of a component model which is reused to create this composite model.
" } }, "assetModelCompositeModelProperties": { "target": "com.amazonaws.iotsitewise#AssetModelPropertyDefinitions", "traits": { - "smithy.api#documentation": "The property definitions of the composite model. For more information, see .
\nYou can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.
" + "smithy.api#documentation": "The property definitions of the composite model. For more information, see \n Inline custom composite models in the IoT SiteWise User Guide.
\nYou can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.
" } } }, @@ -4979,7 +4986,7 @@ "assetModelCompositeModelId": { "target": "com.amazonaws.iotsitewise#ID", "traits": { - "smithy.api#documentation": "The ID of the composed asset model. You can use this ID when you call other IoT SiteWise APIs.
", + "smithy.api#documentation": "The ID of the composed asset model. You can use this ID when you call other IoT SiteWise\n APIs.
", "smithy.api#required": {} } }, @@ -5007,7 +5014,7 @@ "assetModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "A unique, friendly name for the asset model.
", + "smithy.api#documentation": "A unique name for the asset model.
", "smithy.api#required": {} } }, @@ -5032,7 +5039,7 @@ "assetModelCompositeModels": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModelDefinitions", "traits": { - "smithy.api#documentation": "The composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.
\nWhen creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see .
\nThe composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.
\nWhen creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see Creating custom composite models (Components) in the\n IoT SiteWise User Guide.
\nDefines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a\n bulk import job (CLI) in the Amazon Simple Storage Service User Guide.
\nBefore you create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier.\n For more information about how to configure storage settings, see PutStorageConfiguration.
\nBulk import is designed to store historical data to IoT SiteWise. It does not trigger computations or notifications on \n IoT SiteWise warm or cold tier storage.
\nDefines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a\n bulk import job (CLI) in the Amazon Simple Storage Service User Guide.
\nBefore you create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier.\n For more information about how to configure storage settings, see PutStorageConfiguration.
\nBulk import is designed to store historical data to IoT SiteWise. It does not trigger\n computations or notifications on IoT SiteWise warm or cold tier storage.
\nA unique, friendly name for the gateway.
", + "smithy.api#documentation": "A unique name for the gateway.
", "smithy.api#required": {} } }, @@ -5864,7 +5871,7 @@ "scalarValue": { "target": "com.amazonaws.iotsitewise#ScalarValue", "traits": { - "smithy.api#documentation": "Indicates if the data point is a scalar value such as integer, string, double, or Boolean.
" + "smithy.api#documentation": "Indicates if the data point is a scalar value such as integer, string, double, or Boolean.\n
" } }, "arrayValue": { @@ -6067,7 +6074,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a composite model. This action can't be undone. You must delete all assets created\n from a composite model before you can delete the model. Also, you can't delete a composite model if\n a parent asset model exists that contains a property formula expression that depends on the\n asset model that you want to delete. For more information, see Deleting assets and models in the\n IoT SiteWise User Guide.
", + "smithy.api#documentation": "Deletes a composite model. This action can't be undone. You must delete all assets created\n from a composite model before you can delete the model. Also, you can't delete a composite\n model if a parent asset model exists that contains a property formula expression that depends\n on the asset model that you want to delete. For more information, see Deleting assets and\n models in the IoT SiteWise User Guide.
", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -6279,6 +6286,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iotsitewise#ConflictingOperationException" + }, { "target": "com.amazonaws.iotsitewise#InternalFailureException" }, @@ -6829,7 +6839,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves information about an asset composite model (also known as an asset component). An AssetCompositeModel
is an instance of an AssetModelCompositeModel
. If you want to see information about the model this is based on, call \n DescribeAssetModelCompositeModel.
Retrieves information about an asset composite model (also known as an asset component).\n An AssetCompositeModel
is an instance of an\n AssetModelCompositeModel
. If you want to see information about the model this is\n based on, call DescribeAssetModelCompositeModel.
An external ID to assign to the asset model.
\nIf the composite model is a component-based composite model, or one nested inside a component model, you can only set the external ID using \n UpdateAssetModelCompositeModel
and specifying the derived ID of the model or property from the created model it's a part of.
An external ID to assign to the asset model.
\nIf the composite model is a component-based composite model, or one nested inside a\n component model, you can only set the external ID using\n UpdateAssetModelCompositeModel
and specifying the derived ID of the model or\n property from the created model it's a part of.
The composite model type. Valid values are AWS/ALARM
, CUSTOM
, or AWS/L4E_ANOMALY
.
The composite model type. Valid values are AWS/ALARM
, CUSTOM
, or\n AWS/L4E_ANOMALY
.
Retrieves information about an asset model composite model (also known as an asset model component). For more information, see Custom composite models (Components) in the IoT SiteWise User Guide.
", + "smithy.api#documentation": "Retrieves information about an asset model composite model (also known as an asset model\n component). For more information, see Custom composite models\n (Components) in the IoT SiteWise User Guide.
", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -7117,7 +7127,7 @@ "assetModelCompositeModelType": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "The composite model type. Valid values are AWS/ALARM
, CUSTOM
, or AWS/L4E_ANOMALY
.
The composite model type. Valid values are AWS/ALARM
, CUSTOM
, or\n AWS/L4E_ANOMALY
.
Metadata for the composition relationship established by using composedAssetModelId
in \n CreateAssetModelCompositeModel
\n .\n For instance, an array detailing the path of the composition relationship for this composite model.
Metadata for the composition relationship established by using\n composedAssetModelId
in \n CreateAssetModelCompositeModel
\n . For instance, an array detailing the\n path of the composition relationship for this composite model.
\n Whether or not to exclude asset model properties from the response.\n
", + "smithy.api#documentation": "Whether or not to exclude asset model properties from the response.
", "smithy.api#httpQuery": "excludeProperties" } } @@ -7224,7 +7234,7 @@ "assetModelCompositeModels": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModels", "traits": { - "smithy.api#documentation": "The list of built-in composite models for the asset model, such as those with those of type AWS/ALARMS
.
The list of built-in composite models for the asset model, such as those with those of\n type AWS/ALARMS
.
The list of the immediate child custom composite model summaries for the asset model.
" + "smithy.api#documentation": "The list of the immediate child custom composite model summaries for the asset\n model.
" } }, "assetModelExternalId": { @@ -7362,7 +7372,7 @@ "compositeModel": { "target": "com.amazonaws.iotsitewise#CompositeModelProperty", "traits": { - "smithy.api#documentation": "The composite model that declares this asset property, if this asset property exists\n in a composite model.
" + "smithy.api#documentation": "The composite model that declares this asset property, if this asset property exists in a\n composite model.
" } }, "assetExternalId": { @@ -7391,7 +7401,7 @@ "target": "com.amazonaws.iotsitewise#ExcludeProperties", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "\n Whether or not to exclude asset properties from the response.\n
", + "smithy.api#documentation": "Whether or not to exclude asset properties from the response.
", "smithy.api#httpQuery": "excludeProperties" } } @@ -7962,7 +7972,7 @@ } }, "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { "smithy.api#documentation": "The name of the gateway.
", "smithy.api#required": {} @@ -8689,7 +8699,7 @@ "assetId": { "target": "com.amazonaws.iotsitewise#CustomID", "traits": { - "smithy.api#documentation": "The ID of the parent asset from which to disassociate the child asset. This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.
The ID of the parent asset from which to disassociate the child asset.\n This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.
The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow different groupings\n of assets to be formed that all come from the same asset model. You can use the hierarchy ID\n to identify the correct asset to disassociate. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.
The ID of a hierarchy in the parent asset's model. (This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) Hierarchies allow\n different groupings of assets to be formed that all come from the same asset model. You can\n use the hierarchy ID to identify the correct asset to disassociate. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.
Run SQL queries to retrieve metadata and time-series data from asset models, assets, measurements, metrics, transforms, and aggregates.
", + "smithy.api#documentation": "Run SQL queries to retrieve metadata and time-series data from asset models, assets,\n measurements, metrics, transforms, and aggregates.
", "smithy.api#endpoint": { "hostPrefix": "data." }, @@ -9299,6 +9309,16 @@ "smithy.api#documentation": "Contains a summary of a gateway capability configuration.
" } }, + "com.amazonaws.iotsitewise#GatewayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[^\\u0000-\\u001F\\u007F]+$" + } + }, "com.amazonaws.iotsitewise#GatewayPlatform": { "type": "structure", "members": { @@ -9313,6 +9333,12 @@ "traits": { "smithy.api#documentation": "A gateway that runs on IoT Greengrass V2.
" } + }, + "siemensIE": { + "target": "com.amazonaws.iotsitewise#SiemensIE", + "traits": { + "smithy.api#documentation": "A SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.
" + } } }, "traits": { @@ -9336,9 +9362,9 @@ } }, "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { - "smithy.api#documentation": "The name of the asset.
", + "smithy.api#documentation": "The name of the gateway.
", "smithy.api#required": {} } }, @@ -9493,7 +9519,7 @@ "maxResults": { "target": "com.amazonaws.iotsitewise#GetAssetPropertyValueAggregatesMaxResults", "traits": { - "smithy.api#documentation": "The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.
\nThe size of the result set is equal to 1 MB.
\nThe number of data points in the result set is equal to the value of\n maxResults
. The maximum value of maxResults
is 2500.
The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.
\nThe size of the result set is equal to 1 MB.
\nThe number of data points in the result set is equal to the value of\n maxResults
. The maximum value of maxResults
is 2500.
The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.
\nThe size of the result set is equal to 4 MB.
\nThe number of data points in the result set is equal to the value of\n maxResults
. The maximum value of maxResults
is 20000.
The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs\n first.
\nThe size of the result set is equal to 4 MB.
\nThe number of data points in the result set is equal to the value of\n maxResults
. The maximum value of maxResults
is 20000.
The ARN of the Greengrass group. For more information about how to find a group's\n ARN, see ListGroups and GetGroup in the\n IoT Greengrass API Reference.
", + "smithy.api#documentation": "The ARN of the Greengrass group. For more information about how to find a group's\n ARN, see ListGroups and GetGroup in the IoT Greengrass V1\n API Reference.
", "smithy.api#required": {} } } @@ -9949,7 +9975,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains details for a gateway that runs on IoT Greengrass V2. To create a gateway that runs on IoT Greengrass\n V2, you must deploy the IoT SiteWise Edge component to your gateway device. Your Greengrass\n device role must use the AWSIoTSiteWiseEdgeAccess
policy. For more\n information, see Using IoT SiteWise at the edge in the\n IoT SiteWise User Guide.
Contains details for a gateway that runs on IoT Greengrass V2. To create a gateway that runs on IoT Greengrass V2,\n you must deploy the IoT SiteWise Edge component to your gateway device. Your Greengrass\n device role must use the AWSIoTSiteWiseEdgeAccess
policy. For more\n information, see Using IoT SiteWise at the edge in the\n IoT SiteWise User Guide.
Contains an identity that can access an IoT SiteWise Monitor resource.
\nCurrently, you can't use Amazon Web Services API operations to retrieve IAM Identity Center identity IDs. You can find the\n IAM Identity Center identity IDs in the URL of user and group pages in the IAM Identity Center console.
\nContains an identity that can access an IoT SiteWise Monitor resource.
\nCurrently, you can't use Amazon Web Services API operations to retrieve IAM Identity Center identity IDs. You can\n find the IAM Identity Center identity IDs in the URL of user and group pages in the IAM Identity Center console.
\nFilters the requested list of asset properties. You can choose one of the following\n options:
\n\n ALL
– The list includes all asset properties for a given asset\n model ID.
\n BASE
– The list includes only base asset properties for a given\n asset model ID.
Default: BASE
\n
Filters the requested list of asset properties. You can choose one of the following\n options:
\n\n ALL
– The list includes all asset properties for a given asset model ID.\n
\n BASE
– The list includes only base asset properties for a given asset\n model ID.
Default: BASE
\n
The ID of the hierarchy by which child assets are associated to the asset. (This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) To find a\n hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This\n parameter is required if you choose CHILD
for\n traversalDirection
.
For more information, see Asset hierarchies in the IoT SiteWise User Guide.
", + "smithy.api#documentation": "The ID of the hierarchy by which child assets are associated to the asset.\n (This can be either the actual ID in UUID format, or else externalId:
followed by the external ID, if it has one.\n For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This parameter is required if you choose\n CHILD
for traversalDirection
.
For more information, see Asset hierarchies in the IoT SiteWise User Guide.
", "smithy.api#httpQuery": "hierarchyId" } }, @@ -11476,7 +11512,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves a paginated list of composition relationships for an asset model of type COMPONENT_MODEL
.
Retrieves a paginated list of composition relationships for an asset model of type\n COMPONENT_MODEL
.
The name of the IoT Thing for your SiteWise Edge gateway.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains details for a SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.
" + } + }, "com.amazonaws.iotsitewise#StorageType": { "type": "enum", "members": { @@ -14135,7 +14186,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an asset model and all of the assets that were created from the model. Each asset\n created from the model inherits the updated asset model's property and hierarchy definitions.\n For more information, see Updating assets and models in the\n IoT SiteWise User Guide.
\nThis operation overwrites the existing model with the provided model. To avoid deleting\n your asset model's properties or hierarchies, you must include their IDs and definitions in\n the updated asset model payload. For more information, see DescribeAssetModel.
\nIf you remove a property from an asset model, IoT SiteWise deletes all previous data for that\n property. If you remove a hierarchy definition from an asset model, IoT SiteWise disassociates every\n asset associated with that hierarchy. You can't change the type or data type of an existing\n property.
\nUpdates an asset model and all of the assets that were created from the model. Each asset\n created from the model inherits the updated asset model's property and hierarchy definitions.\n For more information, see Updating assets and models in the\n IoT SiteWise User Guide.
\nIf you remove a property from an asset model, IoT SiteWise deletes all previous data for that\n property. You can’t change the type or data type of an existing property.
\nTo replace an existing asset model property with a new one with the same\n name
, do the following:
Submit an UpdateAssetModel
request with the entire existing property\n removed.
Submit a second UpdateAssetModel
request that includes the new\n property. The new asset property will have the same name
as the previous\n one and IoT SiteWise will generate a new unique id
.
Updates a composite model and all of the assets that were created from the model. Each asset\n created from the model inherits the updated asset model's property and hierarchy definitions.\n For more information, see Updating assets and models in the\n IoT SiteWise User Guide.
\nIf you remove a property from a composite asset model, IoT SiteWise deletes all previous data for that property. You can’t change the type or data type of an existing property.
\nTo replace an existing composite asset model property with a new one with the same name
, do the following:
Submit an UpdateAssetModelCompositeModel
request with the entire existing property removed.
Submit a second UpdateAssetModelCompositeModel
request that includes the new property. The new asset property will have the same\n name
as the previous one and IoT SiteWise will generate a new unique id
.
Updates a composite model and all of the assets that were created from the model. Each\n asset created from the model inherits the updated asset model's property and hierarchy\n definitions. For more information, see Updating assets and models in the\n IoT SiteWise User Guide.
\nIf you remove a property from a composite asset model, IoT SiteWise deletes all previous data\n for that property. You can’t change the type or data type of an existing property.
\nTo replace an existing composite asset model property with a new one with the same\n name
, do the following:
Submit an UpdateAssetModelCompositeModel
request with the entire\n existing property removed.
Submit a second UpdateAssetModelCompositeModel
request that includes\n the new property. The new asset property will have the same name
as the\n previous one and IoT SiteWise will generate a new unique id
.
An external ID to assign to the asset model. You can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to \n the exact same thing as when it was created.
" + "smithy.api#documentation": "An external ID to assign to the asset model. You can only set the external ID of the asset\n model if it wasn't set when it was created, or you're setting it to the exact same thing as\n when it was created.
" } }, "assetModelCompositeModelDescription": { @@ -14223,7 +14274,7 @@ "assetModelCompositeModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "A unique, friendly name for the composite model.
", + "smithy.api#documentation": "A unique name for the composite model.
", "smithy.api#required": {} } }, @@ -14237,7 +14288,7 @@ "assetModelCompositeModelProperties": { "target": "com.amazonaws.iotsitewise#AssetModelProperties", "traits": { - "smithy.api#documentation": "The property definitions of the composite model. For more information, see .
\nYou can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.
" + "smithy.api#documentation": "The property definitions of the composite model. For more information, see \n Inline custom composite models in the IoT SiteWise User Guide.
\nYou can specify up to 200 properties per composite model. For more\n information, see Quotas in the IoT SiteWise User Guide.
" } } }, @@ -14280,7 +14331,7 @@ "assetModelName": { "target": "com.amazonaws.iotsitewise#Name", "traits": { - "smithy.api#documentation": "A unique, friendly name for the asset model.
", + "smithy.api#documentation": "A unique name for the asset model.
", "smithy.api#required": {} } }, @@ -14305,7 +14356,7 @@ "assetModelCompositeModels": { "target": "com.amazonaws.iotsitewise#AssetModelCompositeModels", "traits": { - "smithy.api#documentation": "The composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.
\nWhen creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see .
\nThe composite models that are part of this asset model. It groups properties\n (such as attributes, measurements, transforms, and metrics) and child composite models that\n model parts of your industrial equipment. Each composite model has a type that defines the\n properties that the composite model supports. Use composite models to define alarms on this asset model.
\nWhen creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information,\n see Creating custom composite models (Components) in the\n IoT SiteWise User Guide.
\nAn external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.
" + "smithy.api#documentation": "An external ID to assign to the asset model. The asset model must not already have an\n external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.
" } } }, @@ -14461,7 +14512,7 @@ "assetExternalId": { "target": "com.amazonaws.iotsitewise#ExternalId", "traits": { - "smithy.api#documentation": "An external ID to assign to the asset. The asset must not already have an external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.
" + "smithy.api#documentation": "An external ID to assign to the asset. The asset must not already have an external ID.\n The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide.
" } } }, @@ -14709,9 +14760,9 @@ } }, "gatewayName": { - "target": "com.amazonaws.iotsitewise#Name", + "target": "com.amazonaws.iotsitewise#GatewayName", "traits": { - "smithy.api#documentation": "A unique, friendly name for the gateway.
", + "smithy.api#documentation": "A unique name for the gateway.
", "smithy.api#required": {} } } @@ -14968,13 +15019,13 @@ "propertyId": { "target": "com.amazonaws.iotsitewise#Macro", "traits": { - "smithy.api#documentation": "The ID of the property to use as the variable. You can use the property name
\n if it's from the same asset model. If the property has an external ID, you can specify\n externalId:
followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.
The ID of the property to use as the variable. You can use the property name
\n if it's from the same asset model. If the property has an external ID, you can specify\n externalId:
followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.
The ID of the hierarchy to query for the property ID. You can use the hierarchy's name\n instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify\n externalId:
followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.
You use a hierarchy ID instead of a model ID because you can have several hierarchies\n using the same model and therefore the same propertyId
. For example, you might\n have separately grouped assets that come from the same asset model. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.
The ID of the hierarchy to query for the property ID. You can use the hierarchy's name\n instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify\n externalId:
followed by the external ID. For more information, see Using external IDs in the IoT SiteWise User Guide.
You use a hierarchy ID instead of a model ID because you can have several hierarchies\n using the same model and therefore the same propertyId
. For example, you might\n have separately grouped assets that come from the same asset model. For more information, see\n Asset hierarchies in the IoT SiteWise User Guide.
Asset property data of type integer (number that's greater than or equal to zero).
" + "smithy.api#documentation": "Asset property data of type integer (whole number).
" } }, "doubleValue": { diff --git a/models/ivs-realtime.json b/models/ivs-realtime.json index 234a738b0e..f451a1b089 100644 --- a/models/ivs-realtime.json +++ b/models/ivs-realtime.json @@ -120,7 +120,7 @@ "sdkId": "IVS RealTime", "arnNamespace": "ivs", "cloudFormationName": "IVS", - "cloudTrailEventSource": "REPLACE_WITH_EVENT_SOURCE", + "cloudTrailEventSource": "ivs.amazonaws.com", "endpointPrefix": "ivsrealtime" }, "aws.auth#sigv4": { diff --git a/models/ivs.json b/models/ivs.json index 3f00894941..944a77b51a 100644 --- a/models/ivs.json +++ b/models/ivs.json @@ -132,7 +132,7 @@ "sdkId": "ivs", "arnNamespace": "ivs", "cloudFormationName": "IVS", - "cloudTrailEventSource": "REPLACE_WITH_EVENT_SOURCE" + "cloudTrailEventSource": "ivs.amazonaws.com" }, "aws.auth#sigv4": { "name": "ivs" @@ -157,7 +157,7 @@ "date" ] }, - "smithy.api#documentation": "\n Introduction\n
\nThe Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP\n API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both\n requests and responses, including errors.
\nThe API is an Amazon Web Services regional service. For a list of supported regions and\n Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the\n Amazon Web Services General Reference.
\n\n \n All API request parameters and URLs are case sensitive.\n \n \n
\nFor a summary of notable documentation changes in each release, see Document\n History.
\n\n Allowed Header Values\n
\n\n \n Accept:\n
application/json
\n \n Accept-Encoding:\n
gzip, deflate
\n \n Content-Type:\n
application/json
\n Key Concepts\n
\n\n Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream.
\n\n Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. \n Treat the stream key like a secret, since it allows anyone to stream to the channel.\n \n
\n\n Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token.
\n\n Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration.
\n\n Playback restriction policy — Restricts playback by countries and/or origin sites.
\nFor more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming.
\n\n Tagging\n
\nA tag is a metadata label that you assign to an Amazon Web Services\n resource. A tag comprises a key and a value, both\n set by you. For example, you might set a tag as topic:nature
to label a\n particular video category. See Tagging Amazon Web Services Resources for\n more information, including restrictions that apply to tags and \"Tag naming limits and\n requirements\"; Amazon IVS has no service-specific constraints beyond what is documented\n there.
Tags can help you identify and organize your Amazon Web Services resources. For example,\n you can use the same tag for different resources to indicate that they are related. You can\n also use tags to manage access (see Access Tags).
\nThe Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following\n resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording\n Configurations.
\nAt most 50 tags can be applied to a resource.
\n\n Authentication versus Authorization\n
\nNote the differences between these concepts:
\n\n Authentication is about verifying identity. You need to be\n authenticated to sign Amazon IVS API requests.
\n\n Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition,\n authorization is needed to view Amazon IVS private channels.\n (Private channels are channels that are enabled for \"playback authorization.\")
\n\n Authentication\n
\nAll Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services\n Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying\n API calls for you. However, if your application calls the Amazon IVS API directly, it’s your\n responsibility to sign the requests.
\nYou generate a signature using valid Amazon Web Services credentials that have permission\n to perform the requested action. For example, you must sign PutMetadata requests with a\n signature generated from a user account that has the ivs:PutMetadata
\n permission.
For more information:
\nAuthentication and generating signatures — See Authenticating Requests\n (Amazon Web Services Signature Version 4) in the Amazon Web Services\n General Reference.
\nManaging Amazon IVS permissions — See Identity and Access Management on\n the Security page of the Amazon IVS User Guide.
\n\n Amazon Resource Names (ARNs)\n
\nARNs uniquely identify AWS resources. An ARN is required when you need to specify a\n resource unambiguously across all of AWS, such as in IAM policies and API\n calls. For more information, see Amazon\n Resource Names in the AWS General Reference.
\n\n Channel Endpoints\n
\n\n CreateChannel — Creates a new channel and an associated stream\n key to start streaming.
\n\n GetChannel — Gets the channel configuration for the specified\n channel ARN.
\n\n BatchGetChannel — Performs GetChannel on\n multiple ARNs simultaneously.
\n\n ListChannels — Gets summary information about all channels in\n your account, in the Amazon Web Services region where the API request is processed. This\n list can be filtered to match a specified name or recording-configuration ARN. Filters are\n mutually exclusive and cannot be used together. If you try to use both filters, you will\n get an error (409 Conflict Exception).
\n\n UpdateChannel — Updates a channel's configuration. This does\n not affect an ongoing stream of this channel. You must stop and restart the stream for the\n changes to take effect.
\n\n DeleteChannel — Deletes the specified channel.
\n\n Playback Restriction Policy Endpoints\n
\n\n CreatePlaybackRestrictionPolicy — Creates a new playback\n restriction policy, for constraining playback by countries and/or origins.
\n\n DeletePlaybackRestrictionPolicy — Deletes the specified\n playback restriction policy
\n\n GetPlaybackRestrictionPolicy — Gets the specified playback\n restriction policy.
\n\n ListPlaybackRestrictionPolicies — Gets summary information\n about playback restriction policies.
\n\n UpdatePlaybackRestrictionPolicy — Updates a specified playback\n restriction policy.
\n\n Private Channel Endpoints\n
\nFor more information, see Setting Up Private Channels in the\n Amazon IVS User Guide.
\n\n ImportPlaybackKeyPair — Imports the public portion of a new\n key pair and returns its arn
and fingerprint
. The\n privateKey
can then be used to generate viewer authorization tokens, to\n grant viewers access to private channels (channels enabled for playback\n authorization).
\n GetPlaybackKeyPair — Gets a specified playback authorization\n key pair and returns the arn
and fingerprint
. The\n privateKey
held by the caller can be used to generate viewer authorization\n tokens, to grant viewers access to private channels.
\n ListPlaybackKeyPairs — Gets summary information about playback\n key pairs.
\n\n DeletePlaybackKeyPair — Deletes a specified authorization key\n pair. This invalidates future viewer tokens generated using the key pair’s\n privateKey
.
\n StartViewerSessionRevocation — Starts the process of revoking\n the viewer session associated with a specified channel ARN and viewer ID. Optionally, you\n can provide a version to revoke viewer sessions less than and including that\n version.
\n\n BatchStartViewerSessionRevocation — Performs StartViewerSessionRevocation on multiple channel ARN and viewer ID pairs\n simultaneously.
\n\n Recording Configuration Endpoints\n
\n\n CreateRecordingConfiguration — Creates a new recording\n configuration, used to enable recording to Amazon S3.
\n\n GetRecordingConfiguration — Gets the recording-configuration\n metadata for the specified ARN.
\n\n ListRecordingConfigurations — Gets summary information about\n all recording configurations in your account, in the Amazon Web Services region where the\n API request is processed.
\n\n DeleteRecordingConfiguration — Deletes the recording\n configuration for the specified ARN.
\n\n Stream Endpoints\n
\n\n GetStream — Gets information about the active (live) stream on\n a specified channel.
\n\n GetStreamSession — Gets metadata on a specified stream.
\n\n ListStreams — Gets summary information about live streams in\n your account, in the Amazon Web Services region where the API request is processed.
\n\n ListStreamSessions — Gets a summary of current and previous\n streams for a specified channel in your account, in the AWS region where the API request\n is processed.
\n\n StopStream — Disconnects the incoming RTMPS stream for the\n specified channel. Can be used in conjunction with DeleteStreamKey to\n prevent further streaming to a channel.
\n\n PutMetadata — Inserts metadata into the active stream of the\n specified channel. At most 5 requests per second per channel are allowed, each with a\n maximum 1 KB payload. (If 5 TPS is not sufficient for your needs, we recommend batching\n your data into a single PutMetadata call.) At most 155 requests per second per account are\n allowed.
\n\n Stream Key Endpoints\n
\n\n CreateStreamKey — Creates a stream key, used to initiate a\n stream, for the specified channel ARN.
\n\n GetStreamKey — Gets stream key information for the specified\n ARN.
\n\n BatchGetStreamKey — Performs GetStreamKey on\n multiple ARNs simultaneously.
\n\n ListStreamKeys — Gets summary information about stream keys\n for the specified channel.
\n\n DeleteStreamKey — Deletes the stream key for the specified\n ARN, so it can no longer be used to stream.
\n\n Amazon Web Services Tags Endpoints\n
\n\n TagResource — Adds or updates tags for the Amazon Web Services\n resource with the specified ARN.
\n\n UntagResource — Removes tags from the resource with the\n specified ARN.
\n\n ListTagsForResource — Gets information about Amazon Web Services tags for the specified ARN.
\n\n Introduction\n
\nThe Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP\n API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both\n requests and responses, including errors.
\nThe API is an Amazon Web Services regional service. For a list of supported regions and\n Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the\n Amazon Web Services General Reference.
\n\n \n All API request parameters and URLs are case sensitive.\n \n \n
\nFor a summary of notable documentation changes in each release, see Document\n History.
\n\n Allowed Header Values\n
\n\n \n Accept:\n
application/json
\n \n Accept-Encoding:\n
gzip, deflate
\n \n Content-Type:\n
application/json
\n Key Concepts\n
\n\n Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream.
\n\n Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. \n Treat the stream key like a secret, since it allows anyone to stream to the channel.\n \n
\n\n Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token.
\n\n Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration.
\n\n Playback restriction policy — Restricts playback by countries and/or origin sites.
\nFor more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming.
\n\n Tagging\n
\nA tag is a metadata label that you assign to an Amazon Web Services\n resource. A tag comprises a key and a value, both\n set by you. For example, you might set a tag as topic:nature
to label a\n particular video category. See Tagging Amazon Web Services Resources for\n more information, including restrictions that apply to tags and \"Tag naming limits and\n requirements\"; Amazon IVS has no service-specific constraints beyond what is documented\n there.
Tags can help you identify and organize your Amazon Web Services resources. For example,\n you can use the same tag for different resources to indicate that they are related. You can\n also use tags to manage access (see Access Tags).
\nThe Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following\n resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording\n Configurations.
\nAt most 50 tags can be applied to a resource.
\n\n Authentication versus Authorization\n
\nNote the differences between these concepts:
\n\n Authentication is about verifying identity. You need to be\n authenticated to sign Amazon IVS API requests.
\n\n Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition,\n authorization is needed to view Amazon IVS private channels.\n (Private channels are channels that are enabled for \"playback authorization.\")
\n\n Authentication\n
\nAll Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services\n Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying\n API calls for you. However, if your application calls the Amazon IVS API directly, it’s your\n responsibility to sign the requests.
\nYou generate a signature using valid Amazon Web Services credentials that have permission\n to perform the requested action. For example, you must sign PutMetadata requests with a\n signature generated from a user account that has the ivs:PutMetadata
\n permission.
For more information:
\nAuthentication and generating signatures — See Authenticating Requests\n (Amazon Web Services Signature Version 4) in the Amazon Web Services\n General Reference.
\nManaging Amazon IVS permissions — See Identity and Access Management on\n the Security page of the Amazon IVS User Guide.
\n\n Amazon Resource Names (ARNs)\n
\nARNs uniquely identify AWS resources. An ARN is required when you need to specify a\n resource unambiguously across all of AWS, such as in IAM policies and API\n calls. For more information, see Amazon\n Resource Names in the AWS General Reference.
", "smithy.api#title": "Amazon Interactive Video Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1331,7 +1331,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$$" } }, "com.amazonaws.ivs#ChannelRecordingConfigurationArn": { @@ -1341,7 +1341,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$$" } }, "com.amazonaws.ivs#ChannelSummary": { @@ -3701,26 +3701,32 @@ } }, "com.amazonaws.ivs#RenditionConfigurationRendition": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "FULL_HD", - "name": "FULL_HD" - }, - { - "value": "HD", - "name": "HD" - }, - { - "value": "SD", - "name": "SD" - }, - { - "value": "LOWEST_RESOLUTION", - "name": "LOWEST_RESOLUTION" + "type": "enum", + "members": { + "SD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SD" } - ] + }, + "HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HD" + } + }, + "FULL_HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL_HD" + } + }, + "LOWEST_RESOLUTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOWEST_RESOLUTION" + } + } } }, "com.amazonaws.ivs#RenditionConfigurationRenditionList": { @@ -4537,26 +4543,32 @@ } }, "com.amazonaws.ivs#ThumbnailConfigurationResolution": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "FULL_HD", - "name": "FULL_HD" - }, - { - "value": "HD", - "name": "HD" - }, - { - "value": "SD", - "name": "SD" - }, - { - "value": "LOWEST_RESOLUTION", - "name": "LOWEST_RESOLUTION" + "type": "enum", + "members": { + "SD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SD" } - ] + }, + "HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HD" + } + }, + "FULL_HD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL_HD" + } + }, + "LOWEST_RESOLUTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOWEST_RESOLUTION" + } + } } }, "com.amazonaws.ivs#ThumbnailConfigurationStorage": { diff --git a/models/ivschat.json b/models/ivschat.json index 38a532a2f3..584fd209f7 100644 --- a/models/ivschat.json +++ b/models/ivschat.json @@ -78,7 +78,7 @@ "sdkId": "ivschat", "arnNamespace": "ivschat", "cloudFormationName": "IVSChat", - "cloudTrailEventSource": "REPLACE_WITH_EVENT_SOURCE" + "cloudTrailEventSource": "ivschat.amazonaws.com" }, "aws.auth#sigv4": { "name": "ivschat" diff --git a/models/kinesis-video-webrtc-storage.json b/models/kinesis-video-webrtc-storage.json index f1080dd13c..7f9e198c02 100644 --- a/models/kinesis-video-webrtc-storage.json +++ b/models/kinesis-video-webrtc-storage.json @@ -7,6 +7,9 @@ "operations": [ { "target": "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSession" + }, + { + "target": "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionAsViewer" } ], "traits": { @@ -31,7 +34,7 @@ "X-Amz-User-Agent" ] }, - "smithy.api#documentation": "\n
", + "smithy.api#documentation": "\n
", "smithy.api#title": "Amazon Kinesis Video WebRTC Storage", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -75,7 +78,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -94,7 +96,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -122,13 +123,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -141,7 +143,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -155,7 +156,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,7 +178,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -213,11 +212,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -228,16 +225,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -251,14 +251,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -267,15 +265,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -286,16 +283,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -309,7 +309,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -329,11 +328,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -344,20 +341,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -368,18 +367,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -718,6 +721,16 @@ "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*):kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+$" } }, + "com.amazonaws.kinesisvideowebrtcstorage#ClientId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" + } + }, "com.amazonaws.kinesisvideowebrtcstorage#ClientLimitExceededException": { "type": "structure", "members": { @@ -767,7 +780,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Join the ongoing one way-video and/or multi-way audio WebRTC session as \n a video producing device for an input channel. If there’s no existing \n session for the channel, a new streaming session needs to be created, and the\n Amazon Resource Name (ARN) of the signaling channel must be provided.\n
\nCurrently for the SINGLE_MASTER
type, a video producing\n device is able to ingest both audio and video media into a stream,\n while viewers can only ingest audio. Both a video producing device \n and viewers can join the session first, and wait for other participants.
While participants are having peer to peer conversations through webRTC, \n the ingested media session will be stored into the Kinesis Video Stream.\n Multiple viewers are able to playback real-time media.
\nCustomers can also use existing Kinesis Video Streams features like \n HLS
or DASH
playback, Image generation, and more\n with ingested WebRTC media.
Assume that only one video producing device client\n can be associated with a session for the channel. If more than one \n client joins the session of a specific channel as a video producing device,\n the most recent client request takes precedence.
\nBefore using this API, you must call the GetSignalingChannelEndpoint
API to request the WEBRTC endpoint. You then specify the endpoint and region in your JoinStorageSession
API request.
Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing\n device for an input channel. If there’s no existing session for the channel, a new streaming\n session needs to be created, and the Amazon Resource Name (ARN) of the signaling channel must\n be provided.
\nCurrently for the SINGLE_MASTER
type, a video producing\n device is able to ingest both audio and video media into a stream. Only video producing devices can join the session and record media.
Both audio and video tracks are currently required for WebRTC ingestion.
\nCurrent requirements:
\nVideo track: H.264
\nAudio track: Opus
\nThe resulting ingested video in the Kinesis video stream will have the following\n parameters: H.264 video and AAC audio.
\nOnce a master participant has negotiated a connection through WebRTC, the ingested media\n session will be stored in the Kinesis video stream. Multiple viewers are then able to play\n back real-time media through our Playback APIs.
\nYou can also use existing Kinesis Video Streams features like HLS
or\n DASH
playback, image generation via GetImages, and more\n with ingested WebRTC media.
S3 image delivery and notifications are not currently supported.
\nAssume that only one video producing device client\n can be associated with a session for the channel. If more than one \n client joins the session of a specific channel as a video producing device,\n the most recent client request takes precedence.
\n\n Additional information\n
\n\n Idempotent - This API is not idempotent.
\n\n Retry behavior - This is counted as a new API call.
\n\n Concurrent calls - Concurrent calls are allowed. An offer is sent once per each call.
\n\n Join the ongoing one way-video and/or multi-way audio WebRTC session as \n a viewer for an input channel. If there’s\n no existing session for the channel, create a new streaming session and provide\n the Amazon Resource Name (ARN) of the signaling channel (channelArn
)\n and client id (clientId
).\n
Currently for SINGLE_MASTER
type, a video producing device\n is able to ingest both audio and video media into a stream, while viewers\n can only ingest audio. Both a video producing device and viewers can join\n a session first and wait for other participants. While participants are having peer to peer conversations through WebRTC,\n the ingested media session will be stored into the Kinesis Video Stream.\n Multiple viewers are able to playback real-time media.\n
Customers can also use existing Kinesis Video Streams features like\n HLS
or DASH
playback, Image generation, and more \n with ingested WebRTC media. If there’s an existing session with the same\n clientId
that's found in the join session request, the new request takes precedence.
\n The Amazon Resource Name (ARN) of the signaling channel.\n
", + "smithy.api#required": {} + } + }, + "clientId": { + "target": "com.amazonaws.kinesisvideowebrtcstorage#ClientId", + "traits": { + "smithy.api#documentation": "\n The unique identifier for the sender client.\n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.kinesisvideowebrtcstorage#JoinStorageSessionInput": { "type": "structure", "members": { diff --git a/models/kinesis.json b/models/kinesis.json index f03db12797..4176b38755 100644 --- a/models/kinesis.json +++ b/models/kinesis.json @@ -670,6 +670,21 @@ "value": "control" } }, + "smithy.test#smokeTests": [ + { + "id": "DescribeStreamFailure", + "params": { + "StreamName": "bogus-stream-name" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "StreamExists": { "acceptors": [ @@ -6069,7 +6084,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "Limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListStreamsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.kinesis#ListStreamsInput": { diff --git a/models/lex-models-v2.json b/models/lex-models-v2.json index 4bb6405418..12c84bb0a1 100644 --- a/models/lex-models-v2.json +++ b/models/lex-models-v2.json @@ -2629,6 +2629,44 @@ "smithy.api#output": {} } }, + "com.amazonaws.lexmodelsv2#BedrockGuardrailConfiguration": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.lexmodelsv2#BedrockGuardrailIdentifier", + "traits": { + "smithy.api#documentation": "The unique guardrail id for the Bedrock guardrail configuration.
", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.lexmodelsv2#BedrockGuardrailVersion", + "traits": { + "smithy.api#documentation": "The guardrail version for the Bedrock guardrail configuration.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The details on the Bedrock guardrail configuration.
" + } + }, + "com.amazonaws.lexmodelsv2#BedrockGuardrailIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + } + }, + "com.amazonaws.lexmodelsv2#BedrockGuardrailVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(([1-9][0-9]{0,7})|(DRAFT))$" + } + }, "com.amazonaws.lexmodelsv2#BedrockKnowledgeBaseArn": { "type": "string", "traits": { @@ -2645,21 +2683,57 @@ "bedrockKnowledgeBaseArn": { "target": "com.amazonaws.lexmodelsv2#BedrockKnowledgeBaseArn", "traits": { - "smithy.api#documentation": "The ARN of the knowledge base used.
", + "smithy.api#documentation": "The base ARN of the knowledge base used.
", "smithy.api#required": {} } + }, + "exactResponse": { + "target": "com.amazonaws.lexmodelsv2#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "Specifies whether to return an exact response, or to return an answer generated by the model, using the fields you specify from the database.
" + } + }, + "exactResponseFields": { + "target": "com.amazonaws.lexmodelsv2#BedrockKnowledgeStoreExactResponseFields", + "traits": { + "smithy.api#documentation": "Contains the names of the fields used for an exact response to the user.
" + } } }, "traits": { "smithy.api#documentation": "Contains details about the configuration of a Amazon Bedrock knowledge base.
" } }, + "com.amazonaws.lexmodelsv2#BedrockKnowledgeStoreExactResponseFields": { + "type": "structure", + "members": { + "answerField": { + "target": "com.amazonaws.lexmodelsv2#AnswerField", + "traits": { + "smithy.api#documentation": "The answer field used for an exact response from Bedrock Knowledge Store.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The exact response fields given by the Bedrock knowledge store.
" + } + }, "com.amazonaws.lexmodelsv2#BedrockModelArn": { "type": "string", "traits": { "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model\\/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}$" } }, + "com.amazonaws.lexmodelsv2#BedrockModelCustomPrompt": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4000 + } + } + }, "com.amazonaws.lexmodelsv2#BedrockModelSpecification": { "type": "structure", "members": { @@ -2669,12 +2743,47 @@ "smithy.api#documentation": "The ARN of the foundation model used in descriptive bot building.
", "smithy.api#required": {} } + }, + "guardrail": { + "target": "com.amazonaws.lexmodelsv2#BedrockGuardrailConfiguration", + "traits": { + "smithy.api#documentation": "The guardrail configuration in the Bedrock model specification details.
" + } + }, + "traceStatus": { + "target": "com.amazonaws.lexmodelsv2#BedrockTraceStatus", + "traits": { + "smithy.api#documentation": "The Bedrock trace status in the Bedrock model specification details.
" + } + }, + "customPrompt": { + "target": "com.amazonaws.lexmodelsv2#BedrockModelCustomPrompt", + "traits": { + "smithy.api#documentation": "The custom prompt used in the Bedrock model specification details.
" + } } }, "traits": { "smithy.api#documentation": "Contains information about the Amazon Bedrock model used to interpret the prompt used in descriptive bot building.
" } }, + "com.amazonaws.lexmodelsv2#BedrockTraceStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.lexmodelsv2#Boolean": { "type": "boolean", "traits": { @@ -6154,7 +6263,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds a new resource policy statement to a bot or bot alias. If a\n resource policy exists, the statement is added to the current resource\n policy. If a policy doesn't exist, a new policy is created.
\nYou can't create a resource policy statement that allows\n cross-account access.
", + "smithy.api#documentation": "Adds a new resource policy statement to a bot or bot alias. If a\n resource policy exists, the statement is added to the current resource\n policy. If a policy doesn't exist, a new policy is created.
\nYou can't create a resource policy statement that allows\n cross-account access.
\nYou need to add the CreateResourcePolicy
or UpdateResourcePolicy
\n action to the bot role in order to call the API.
Deletes a policy statement from a resource policy. If you delete the\n last statement from a policy, the policy is deleted. If you specify a\n statement ID that doesn't exist in the policy, or if the bot or bot\n alias doesn't have a policy attached, Amazon Lex returns an\n exception.
", + "smithy.api#documentation": "Deletes a policy statement from a resource policy. If you delete the\n last statement from a policy, the policy is deleted. If you specify a\n statement ID that doesn't exist in the policy, or if the bot or bot\n alias doesn't have a policy attached, Amazon Lex returns an\n exception.
\nYou need to add the DeleteResourcePolicy
or UpdateResourcePolicy
\n action to the bot role in order to call the API.
Setting this flag will force the CopyImageSet
operation, even if Patient, Study, or Series level\n metadata are mismatched across the sourceImageSet
and destinationImageSet
.
The latest version identifier for the source image set.
", "smithy.api#required": {} } + }, + "DICOMCopies": { + "target": "com.amazonaws.medicalimaging#MetadataCopies", + "traits": { + "smithy.api#documentation": "Contains MetadataCopies
structure and wraps information related to specific copy use cases.\n For example, when copying subsets.
The Amazon Resource Name (ARN) assigned to the image set.
" } + }, + "overrides": { + "target": "com.amazonaws.medicalimaging#Overrides", + "traits": { + "smithy.api#documentation": "This object contains the details of any overrides used while creating a specific image set version.\n If an image set was copied or updated using the force
flag, this object will contain the\n forced
flag.
The error message thrown if an image set action fails.
" } + }, + "overrides": { + "target": "com.amazonaws.medicalimaging#Overrides", + "traits": { + "smithy.api#documentation": "Contains details on overrides used when creating the returned version of an image set.\n For example, if forced
exists, the forced
flag was used when \n creating the image set.
The JSON string used to specify a subset of SOP Instances to copy from source to destination image set.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains copiable Attributes
structure and wraps information related to specific copy use cases.\n For example, when copying subsets.
The object containing removableAttributes
and updatableAttributes
.
Specifies the previous image set version ID to revert the current image set back to.
\nYou must provide either revertToVersionId
or DICOMUpdates
in your request. A \n ValidationException
error is thrown if both parameters are provided at the same time.
Setting this flag will force the CopyImageSet
and UpdateImageSetMetadata
\n operations, even if Patient, Study, or Series level metadata are mismatched.
Specifies the overrides used in image set modification calls to CopyImageSet
and \n UpdateImageSetMetadata
.
Setting this flag will force the UpdateImageSetMetadata
operation for the following attributes:
\n Tag.StudyInstanceUID
, Tag.SeriesInstanceUID
, Tag.SOPInstanceUID
, and Tag.StudyID
\n
Adding, removing, or updating private tags for an individual SOP Instance
\nMemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands.
", + "smithy.api#documentation": "MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands.
", "smithy.api#title": "Amazon MemoryDB", "smithy.api#xmlNamespace": { "uri": "http://memorydb.amazonaws.com/doc/2021-01-01/" @@ -409,7 +409,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -452,7 +451,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -465,7 +465,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -479,7 +478,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -502,7 +500,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -537,7 +534,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -548,14 +544,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -569,14 +567,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -585,11 +581,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -600,14 +596,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -621,7 +619,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -641,7 +638,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -652,14 +648,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -697,9 +695,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1464,13 +1464,13 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "The Redis engine version used by the cluster
" + "smithy.api#documentation": "The Redis OSS engine version used by the cluster
" } }, "EnginePatchVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "The Redis engine patch version used by the cluster
" + "smithy.api#documentation": "The Redis OSS engine patch version used by the cluster
" } }, "ParameterGroupName": { @@ -1609,7 +1609,7 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "The Redis engine version used by the cluster
" + "smithy.api#documentation": "The Redis OSS engine version used by the cluster
" } }, "MaintenanceWindow": { @@ -2111,7 +2111,7 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "The version number of the Redis engine to be used for the cluster.
" + "smithy.api#documentation": "The version number of the Redis OSS engine to be used for the cluster.
" } }, "AutoMinorVersionUpgrade": { @@ -2593,7 +2593,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a cluster. It also deletes all associated nodes and node endpoints
" + "smithy.api#documentation": "Deletes a cluster. It also deletes all associated nodes and node endpoints
\n\n CreateSnapshot
permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied
exception.
Returns a list of the available Redis engine versions.
", + "smithy.api#documentation": "Returns a list of the available Redis OSS engine versions.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3042,7 +3042,7 @@ "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "The Redis engine version
" + "smithy.api#documentation": "The Redis OSS engine version
" } }, "ParameterGroupFamily": { @@ -3946,7 +3946,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides details of the Redis engine version
" + "smithy.api#documentation": "Provides details of the Redis OSS engine version
" } }, "com.amazonaws.memorydb#EngineVersionInfoList": { diff --git a/models/mobile.json b/models/mobile.json deleted file mode 100644 index 86d8956971..0000000000 --- a/models/mobile.json +++ /dev/null @@ -1,1922 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.mobile#AWSMobileService": { - "type": "service", - "version": "2017-07-01", - "operations": [ - { - "target": "com.amazonaws.mobile#CreateProject" - }, - { - "target": "com.amazonaws.mobile#DeleteProject" - }, - { - "target": "com.amazonaws.mobile#DescribeBundle" - }, - { - "target": "com.amazonaws.mobile#DescribeProject" - }, - { - "target": "com.amazonaws.mobile#ExportBundle" - }, - { - "target": "com.amazonaws.mobile#ExportProject" - }, - { - "target": "com.amazonaws.mobile#ListBundles" - }, - { - "target": "com.amazonaws.mobile#ListProjects" - }, - { - "target": "com.amazonaws.mobile#UpdateProject" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "Mobile", - "arnNamespace": "awsmobilehubservice", - "cloudFormationName": "Mobile", - "cloudTrailEventSource": "mobile.amazonaws.com", - "endpointPrefix": "mobile" - }, - "aws.auth#sigv4": { - "name": "AWSMobileHubService" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "\n AWS Mobile Service provides mobile app and website developers with capabilities\n required to configure AWS resources and bootstrap their developer desktop projects\n with the necessary SDKs, constants, tools and samples to make use of those resources.\n
", - "smithy.api#title": "AWS Mobile", - "smithy.api#xmlNamespace": { - "uri": "http://mobile.amazonaws.com" - }, - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://mobile.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mobile.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - }, - "com.amazonaws.mobile#AccountActionRequiredException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n Account Action is required in order to continue the request.\n
", - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.mobile#AttributeKey": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Key part of key-value attribute pairs.\n
" - } - }, - "com.amazonaws.mobile#AttributeValue": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Value part of key-value attribute pairs.\n
" - } - }, - "com.amazonaws.mobile#Attributes": { - "type": "map", - "key": { - "target": "com.amazonaws.mobile#AttributeKey" - }, - "value": { - "target": "com.amazonaws.mobile#AttributeValue" - }, - "traits": { - "smithy.api#documentation": "\n Key-value attribute pairs.\n
" - } - }, - "com.amazonaws.mobile#BadRequestException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n The request cannot be processed because some parameter is not valid or the project\n state prevents the operation from being performed.\n
", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.mobile#Boolean": { - "type": "boolean", - "traits": { - "smithy.api#default": false - } - }, - "com.amazonaws.mobile#BundleDescription": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Description of the download bundle.\n
" - } - }, - "com.amazonaws.mobile#BundleDetails": { - "type": "structure", - "members": { - "bundleId": { - "target": "com.amazonaws.mobile#BundleId" - }, - "title": { - "target": "com.amazonaws.mobile#BundleTitle" - }, - "version": { - "target": "com.amazonaws.mobile#BundleVersion" - }, - "description": { - "target": "com.amazonaws.mobile#BundleDescription" - }, - "iconUrl": { - "target": "com.amazonaws.mobile#IconUrl" - }, - "availablePlatforms": { - "target": "com.amazonaws.mobile#Platforms" - } - }, - "traits": { - "smithy.api#documentation": "\n The details of the bundle.\n
" - } - }, - "com.amazonaws.mobile#BundleId": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Unique bundle identifier.\n
" - } - }, - "com.amazonaws.mobile#BundleList": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#BundleDetails" - }, - "traits": { - "smithy.api#documentation": "\n A list of bundles.\n
" - } - }, - "com.amazonaws.mobile#BundleTitle": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Title of the download bundle.\n
" - } - }, - "com.amazonaws.mobile#BundleVersion": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Version of the download bundle.\n
" - } - }, - "com.amazonaws.mobile#ConsoleUrl": { - "type": "string" - }, - "com.amazonaws.mobile#Contents": { - "type": "blob", - "traits": { - "smithy.api#documentation": "\n Binary file data.\n
" - } - }, - "com.amazonaws.mobile#CreateProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#CreateProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#CreateProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#LimitExceededException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Creates an AWS Mobile Hub project.\n
", - "smithy.api#http": { - "method": "POST", - "uri": "/projects", - "code": 200 - } - } - }, - "com.amazonaws.mobile#CreateProjectRequest": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.mobile#ProjectName", - "traits": { - "smithy.api#documentation": "\n Name of the project.\n
", - "smithy.api#httpQuery": "name" - } - }, - "region": { - "target": "com.amazonaws.mobile#ProjectRegion", - "traits": { - "smithy.api#documentation": "\n Default region where project resources should be created.\n
", - "smithy.api#httpQuery": "region" - } - }, - "contents": { - "target": "com.amazonaws.mobile#Contents", - "traits": { - "smithy.api#documentation": "\n ZIP or YAML file which contains configuration settings to be used when creating\n the project. This may be the contents of the file downloaded from the URL provided\n in an export project operation.\n
", - "smithy.api#httpPayload": {} - } - }, - "snapshotId": { - "target": "com.amazonaws.mobile#SnapshotId", - "traits": { - "smithy.api#documentation": "\n Unique identifier for an exported snapshot of project configuration. This\n snapshot identifier is included in the share URL when a project is exported.\n
", - "smithy.api#httpQuery": "snapshotId" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used to request a project be created.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#CreateProjectResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#ProjectDetails", - "traits": { - "smithy.api#documentation": "\n Detailed information about the created AWS Mobile Hub project.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure used in response to a request to create a project.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#Date": { - "type": "timestamp" - }, - "com.amazonaws.mobile#DeleteProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#DeleteProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#DeleteProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Delets a project in AWS Mobile Hub.\n
", - "smithy.api#http": { - "method": "DELETE", - "uri": "/projects/{projectId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#DeleteProjectRequest": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used to request a project be deleted.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#DeleteProjectResult": { - "type": "structure", - "members": { - "deletedResources": { - "target": "com.amazonaws.mobile#Resources", - "traits": { - "smithy.api#documentation": "\n Resources which were deleted.\n
" - } - }, - "orphanedResources": { - "target": "com.amazonaws.mobile#Resources", - "traits": { - "smithy.api#documentation": "\n Resources which were not deleted, due to a risk of losing potentially\n important data or files.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure used in response to request to delete a project.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#DescribeBundle": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#DescribeBundleRequest" - }, - "output": { - "target": "com.amazonaws.mobile#DescribeBundleResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Get the bundle details for the requested bundle id.\n
", - "smithy.api#http": { - "method": "GET", - "uri": "/bundles/{bundleId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#DescribeBundleRequest": { - "type": "structure", - "members": { - "bundleId": { - "target": "com.amazonaws.mobile#BundleId", - "traits": { - "smithy.api#documentation": "\n Unique bundle identifier.\n
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure to request the details of a specific bundle.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#DescribeBundleResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#BundleDetails", - "traits": { - "smithy.api#documentation": "\n The details of the bundle.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure contains the details of the bundle.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#DescribeProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#DescribeProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#DescribeProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Gets details about a project in AWS Mobile Hub.\n
", - "smithy.api#http": { - "method": "GET", - "uri": "/project", - "code": 200 - } - } - }, - "com.amazonaws.mobile#DescribeProjectRequest": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
", - "smithy.api#httpQuery": "projectId", - "smithy.api#required": {} - } - }, - "syncFromResources": { - "target": "com.amazonaws.mobile#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n If set to true, causes AWS Mobile Hub to synchronize information from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub project.\n
", - "smithy.api#httpQuery": "syncFromResources" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used to request details about a project.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#DescribeProjectResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#ProjectDetails" - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure used for requests of project details.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#DownloadUrl": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n The download Url.\n
" - } - }, - "com.amazonaws.mobile#ErrorMessage": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n The Exception Error Message.\n
" - } - }, - "com.amazonaws.mobile#ExportBundle": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ExportBundleRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ExportBundleResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Generates customized software development kit (SDK) and or tool packages\n used to integrate mobile web or mobile app clients with backend AWS resources.\n
", - "smithy.api#http": { - "method": "POST", - "uri": "/bundles/{bundleId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#ExportBundleRequest": { - "type": "structure", - "members": { - "bundleId": { - "target": "com.amazonaws.mobile#BundleId", - "traits": { - "smithy.api#documentation": "\n Unique bundle identifier.\n
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
", - "smithy.api#httpQuery": "projectId" - } - }, - "platform": { - "target": "com.amazonaws.mobile#Platform", - "traits": { - "smithy.api#documentation": "\n Developer desktop or target application platform.\n
", - "smithy.api#httpQuery": "platform" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used to request generation of custom SDK and tool packages\n required to integrate mobile web or app clients with backed AWS resources.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ExportBundleResult": { - "type": "structure", - "members": { - "downloadUrl": { - "target": "com.amazonaws.mobile#DownloadUrl", - "traits": { - "smithy.api#documentation": "\n URL which contains the custom-generated SDK and tool packages used\n to integrate the client mobile app or web app with the AWS resources\n created by the AWS Mobile Hub project.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure which contains link to download custom-generated SDK and\n tool packages used to integrate mobile web or app clients with backed\n AWS resources.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#ExportProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ExportProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ExportProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Exports project configuration to a snapshot which can be downloaded and shared.\n Note that mobile app push credentials are encrypted in exported projects, so they\n can only be shared successfully within the same AWS account.\n
", - "smithy.api#http": { - "method": "POST", - "uri": "/exports/{projectId}", - "code": 200 - } - } - }, - "com.amazonaws.mobile#ExportProjectRequest": { - "type": "structure", - "members": { - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used in requests to export project configuration details.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ExportProjectResult": { - "type": "structure", - "members": { - "downloadUrl": { - "target": "com.amazonaws.mobile#DownloadUrl", - "traits": { - "smithy.api#documentation": "\n URL which can be used to download the exported project configuation file(s).\n
" - } - }, - "shareUrl": { - "target": "com.amazonaws.mobile#ShareUrl", - "traits": { - "smithy.api#documentation": "\n URL which can be shared to allow other AWS users to create their own project\n in AWS Mobile Hub with the same configuration as the specified project. This\n URL pertains to a snapshot in time of the project configuration that is created\n when this API is called. If you want to share additional changes to your project\n configuration, then you will need to create and share a new snapshot by calling\n this method again.\n
" - } - }, - "snapshotId": { - "target": "com.amazonaws.mobile#SnapshotId", - "traits": { - "smithy.api#documentation": "\n Unique identifier for the exported snapshot of the project configuration. This\n snapshot identifier is included in the share URL.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure used for requests to export project configuration details.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#Feature": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Identifies which feature in AWS Mobile Hub is associated with this AWS resource.\n
" - } - }, - "com.amazonaws.mobile#IconUrl": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Icon for the download bundle.\n
" - } - }, - "com.amazonaws.mobile#InternalFailureException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n The service has encountered an unexpected error condition which prevents it from\n servicing the request.\n
", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.mobile#LimitExceededException": { - "type": "structure", - "members": { - "retryAfterSeconds": { - "target": "com.amazonaws.mobile#ErrorMessage", - "traits": { - "smithy.api#httpHeader": "Retry-After" - } - }, - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n There are too many AWS Mobile Hub projects in the account or the account has\n exceeded the maximum number of resources in some AWS service. You should create\n another sub-account using AWS Organizations or remove some resources and retry\n your request.\n
", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.mobile#ListBundles": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ListBundlesRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ListBundlesResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n List all available bundles.\n
", - "smithy.api#http": { - "method": "GET", - "uri": "/bundles", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.mobile#ListBundlesRequest": { - "type": "structure", - "members": { - "maxResults": { - "target": "com.amazonaws.mobile#MaxResults", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n Maximum number of records to list in a single response.\n
", - "smithy.api#httpQuery": "maxResults" - } - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken", - "traits": { - "smithy.api#documentation": "\n Pagination token. Set to null to start listing bundles from start.\n If non-null pagination token is returned in a result, then pass its\n value in here in another request to list more bundles.\n
", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure to request all available bundles.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ListBundlesResult": { - "type": "structure", - "members": { - "bundleList": { - "target": "com.amazonaws.mobile#BundleList", - "traits": { - "smithy.api#documentation": "\n A list of bundles.\n
" - } - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken", - "traits": { - "smithy.api#documentation": "\n Pagination token. If non-null pagination token is returned in a result,\n then pass its value in another request to fetch more entries.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure contains a list of all available bundles with details.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#ListProjects": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#ListProjectsRequest" - }, - "output": { - "target": "com.amazonaws.mobile#ListProjectsResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Lists projects in AWS Mobile Hub.\n
", - "smithy.api#http": { - "method": "GET", - "uri": "/projects", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.mobile#ListProjectsRequest": { - "type": "structure", - "members": { - "maxResults": { - "target": "com.amazonaws.mobile#MaxResults", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n Maximum number of records to list in a single response.\n
", - "smithy.api#httpQuery": "maxResults" - } - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken", - "traits": { - "smithy.api#documentation": "\n Pagination token. Set to null to start listing projects from start.\n If non-null pagination token is returned in a result, then pass its\n value in here in another request to list more projects.\n
", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used to request projects list in AWS Mobile Hub.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#ListProjectsResult": { - "type": "structure", - "members": { - "projects": { - "target": "com.amazonaws.mobile#ProjectSummaries" - }, - "nextToken": { - "target": "com.amazonaws.mobile#NextToken" - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure used for requests to list projects in AWS Mobile Hub.\n
", - "smithy.api#output": {} - } - }, - "com.amazonaws.mobile#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n Maximum number of records to list in a single response.\n
" - } - }, - "com.amazonaws.mobile#NextToken": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Pagination token. Set to null to start listing records from start.\n If non-null pagination token is returned in a result, then pass its\n value in here in another request to list more entries.\n
" - } - }, - "com.amazonaws.mobile#NotFoundException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n No entity can be found with the specified identifier.\n
", - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.mobile#Platform": { - "type": "enum", - "members": { - "OSX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OSX" - } - }, - "WINDOWS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WINDOWS" - } - }, - "LINUX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LINUX" - } - }, - "OBJC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OBJC" - } - }, - "SWIFT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SWIFT" - } - }, - "ANDROID": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ANDROID" - } - }, - "JAVASCRIPT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "JAVASCRIPT" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Developer desktop or target mobile app or website platform.\n
" - } - }, - "com.amazonaws.mobile#Platforms": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#Platform" - }, - "traits": { - "smithy.api#documentation": "\n Developer desktop or mobile app or website platforms.\n
" - } - }, - "com.amazonaws.mobile#ProjectDetails": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.mobile#ProjectName" - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId" - }, - "region": { - "target": "com.amazonaws.mobile#ProjectRegion" - }, - "state": { - "target": "com.amazonaws.mobile#ProjectState" - }, - "createdDate": { - "target": "com.amazonaws.mobile#Date", - "traits": { - "smithy.api#documentation": "\n Date the project was created.\n
" - } - }, - "lastUpdatedDate": { - "target": "com.amazonaws.mobile#Date", - "traits": { - "smithy.api#documentation": "\n Date of the last modification of the project.\n
" - } - }, - "consoleUrl": { - "target": "com.amazonaws.mobile#ConsoleUrl", - "traits": { - "smithy.api#documentation": "\n Website URL for this project in the AWS Mobile Hub console.\n
" - } - }, - "resources": { - "target": "com.amazonaws.mobile#Resources" - } - }, - "traits": { - "smithy.api#documentation": "\n Detailed information about an AWS Mobile Hub project.\n
" - } - }, - "com.amazonaws.mobile#ProjectId": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
" - } - }, - "com.amazonaws.mobile#ProjectName": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Name of the project.\n
" - } - }, - "com.amazonaws.mobile#ProjectRegion": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Default region to use for AWS resource creation in the AWS Mobile Hub project.\n
" - } - }, - "com.amazonaws.mobile#ProjectState": { - "type": "enum", - "members": { - "NORMAL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NORMAL" - } - }, - "SYNCING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SYNCING" - } - }, - "IMPORTING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IMPORTING" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Synchronization state for a project.\n
" - } - }, - "com.amazonaws.mobile#ProjectSummaries": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#ProjectSummary" - }, - "traits": { - "smithy.api#documentation": "\n List of projects.\n
" - } - }, - "com.amazonaws.mobile#ProjectSummary": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.mobile#ProjectName", - "traits": { - "smithy.api#documentation": "\n Name of the project.\n
" - } - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Summary information about an AWS Mobile Hub project.\n
" - } - }, - "com.amazonaws.mobile#Resource": { - "type": "structure", - "members": { - "type": { - "target": "com.amazonaws.mobile#ResourceType" - }, - "name": { - "target": "com.amazonaws.mobile#ResourceName" - }, - "arn": { - "target": "com.amazonaws.mobile#ResourceArn" - }, - "feature": { - "target": "com.amazonaws.mobile#Feature" - }, - "attributes": { - "target": "com.amazonaws.mobile#Attributes" - } - }, - "traits": { - "smithy.api#documentation": "\n Information about an instance of an AWS resource associated with a project.\n
" - } - }, - "com.amazonaws.mobile#ResourceArn": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n AWS resource name which uniquely identifies the resource in AWS systems.\n
" - } - }, - "com.amazonaws.mobile#ResourceName": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Name of the AWS resource (e.g., for an Amazon S3 bucket this is the name of the bucket).\n
" - } - }, - "com.amazonaws.mobile#ResourceType": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Simplified name for type of AWS resource (e.g., bucket is an Amazon S3 bucket).\n
" - } - }, - "com.amazonaws.mobile#Resources": { - "type": "list", - "member": { - "target": "com.amazonaws.mobile#Resource" - }, - "traits": { - "smithy.api#documentation": "\n List of AWS resources associated with a project.\n
" - } - }, - "com.amazonaws.mobile#ServiceUnavailableException": { - "type": "structure", - "members": { - "retryAfterSeconds": { - "target": "com.amazonaws.mobile#ErrorMessage", - "traits": { - "smithy.api#httpHeader": "Retry-After" - } - }, - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n The service is temporarily unavailable. The request should be retried after some\n time delay.\n
", - "smithy.api#error": "server", - "smithy.api#httpError": 503 - } - }, - "com.amazonaws.mobile#ShareUrl": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n URL which can be shared to allow other AWS users to create their own project\n in AWS Mobile Hub with the same configuration as the specified project. This\n URL pertains to a snapshot in time of the project configuration that is created\n when this API is called. If you want to share additional changes to your project\n configuration, then you will need to create and share a new snapshot by calling\n this method again.\n
" - } - }, - "com.amazonaws.mobile#SnapshotId": { - "type": "string", - "traits": { - "smithy.api#documentation": "\n Unique identifier for the exported snapshot of the project configuration. This\n snapshot identifier is included in the share URL.\n
" - } - }, - "com.amazonaws.mobile#TooManyRequestsException": { - "type": "structure", - "members": { - "retryAfterSeconds": { - "target": "com.amazonaws.mobile#ErrorMessage", - "traits": { - "smithy.api#httpHeader": "Retry-After" - } - }, - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n Too many requests have been received for this AWS account in too short a time. The\n request should be retried after some time delay.\n
", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.mobile#UnauthorizedException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.mobile#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "\n Credentials of the caller are insufficient to authorize the request.\n
", - "smithy.api#error": "client", - "smithy.api#httpError": 401 - } - }, - "com.amazonaws.mobile#UpdateProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.mobile#UpdateProjectRequest" - }, - "output": { - "target": "com.amazonaws.mobile#UpdateProjectResult" - }, - "errors": [ - { - "target": "com.amazonaws.mobile#AccountActionRequiredException" - }, - { - "target": "com.amazonaws.mobile#BadRequestException" - }, - { - "target": "com.amazonaws.mobile#InternalFailureException" - }, - { - "target": "com.amazonaws.mobile#LimitExceededException" - }, - { - "target": "com.amazonaws.mobile#NotFoundException" - }, - { - "target": "com.amazonaws.mobile#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.mobile#TooManyRequestsException" - }, - { - "target": "com.amazonaws.mobile#UnauthorizedException" - } - ], - "traits": { - "smithy.api#documentation": "\n Update an existing project.\n
", - "smithy.api#http": { - "method": "POST", - "uri": "/update", - "code": 200 - } - } - }, - "com.amazonaws.mobile#UpdateProjectRequest": { - "type": "structure", - "members": { - "contents": { - "target": "com.amazonaws.mobile#Contents", - "traits": { - "smithy.api#documentation": "\n ZIP or YAML file which contains project configuration to be updated. This should\n be the contents of the file downloaded from the URL provided in an export project\n operation.\n
", - "smithy.api#httpPayload": {} - } - }, - "projectId": { - "target": "com.amazonaws.mobile#ProjectId", - "traits": { - "smithy.api#documentation": "\n Unique project identifier.\n
", - "smithy.api#httpQuery": "projectId", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "\n Request structure used for requests to update project configuration.\n
", - "smithy.api#input": {} - } - }, - "com.amazonaws.mobile#UpdateProjectResult": { - "type": "structure", - "members": { - "details": { - "target": "com.amazonaws.mobile#ProjectDetails", - "traits": { - "smithy.api#documentation": "\n Detailed information about the updated AWS Mobile Hub project.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Result structure used for requests to updated project configuration.\n
", - "smithy.api#output": {} - } - } - } -} diff --git a/models/neptune-graph.json b/models/neptune-graph.json index effea5b89e..03a6260575 100644 --- a/models/neptune-graph.json +++ b/models/neptune-graph.json @@ -1534,6 +1534,17 @@ "smithy.api#pattern": "^arn:.+$" } }, + "com.amazonaws.neptunegraph#BlankNodeHandling": { + "type": "enum", + "members": { + "CONVERT_TO_IRI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "convertToIri" + } + } + } + }, "com.amazonaws.neptunegraph#CancelImportTask": { "type": "operation", "input": { @@ -2187,6 +2198,12 @@ "smithy.api#documentation": "Specifies the format of S3 data to be imported. Valid values are CSV
, which identifies\n the Gremlin\n CSV format or OPENCYPHER
, which identies the openCypher\n load format.
The method to handle blank nodes in the dataset. Currently, only convertToIri
is supported, \n meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples
. \n For more information, see Handling RDF values.
Specifies the format of S3 data to be imported. Valid values are CSV
, which identifies\n the Gremlin\n CSV format or OPENCYPHER
, which identies the openCypher\n load format.
Specifies the format of S3 data to be imported. Valid values are CSV
, which identifies\n the Gremlin\n CSV format, OPENCYPHER
, which identifies the openCypher\n load format, or ntriples
, which identifies the\n RDF n-triples format.
Specifies the format of Amazon S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or \n OPENCYPHER, which identies the openCypher load format.
" } }, + "blankNodeHandling": { + "target": "com.amazonaws.neptunegraph#BlankNodeHandling", + "traits": { + "smithy.api#documentation": "The method to handle blank nodes in the dataset. Currently, only convertToIri
is supported, \n meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples
. \n For more information, see Handling RDF values.
The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation.\n When you update a rule group, you are limited to this capacity. When you reference a rule group\n from a firewall policy, Network Firewall reserves this capacity for the rule group.
\nYou can retrieve the capacity that would be required for a rule group before you create the rule group by calling\n CreateRuleGroup with DryRun
set to TRUE
.
You can't change or exceed this capacity when you update the rule group, so leave\n room for your rule group to grow.
\n\n Capacity for a stateless rule group\n
\nFor a stateless rule group, the capacity required is the sum of the capacity\n requirements of the individual rules that you expect to have in the rule group.
\nTo calculate the capacity requirement of a single rule, multiply the capacity\n requirement values of each of the rule's match settings:
\nA match setting with no criteria specified has a value of 1.
\nA match setting with Any
specified has a value of 1.
All other match settings have a value equal to the number of elements provided in\n the setting. For example, a protocol setting [\"UDP\"] and a source setting\n [\"10.0.0.0/24\"] each have a value of 1. A protocol setting [\"UDP\",\"TCP\"] has a value\n of 2. A source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"] has a value of 3.\n
\nA rule with no criteria specified in any of its match settings has a capacity\n requirement of 1. A rule with protocol setting [\"UDP\",\"TCP\"], source setting\n [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"], and a single specification or no specification\n for each of the other match settings has a capacity requirement of 6.
\n\n Capacity for a stateful rule group\n
\nFor\n a stateful rule group, the minimum capacity required is the number of individual rules that\n you expect to have in the rule group.
", + "smithy.api#documentation": "The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation.\n When you update a rule group, you are limited to this capacity. When you reference a rule group\n from a firewall policy, Network Firewall reserves this capacity for the rule group.
\nYou can retrieve the capacity that would be required for a rule group before you create the rule group by calling\n CreateRuleGroup with DryRun
set to TRUE
.
You can't change or exceed this capacity when you update the rule group, so leave\n room for your rule group to grow.
\n\n Capacity for a stateless rule group\n
\nFor a stateless rule group, the capacity required is the sum of the capacity\n requirements of the individual rules that you expect to have in the rule group.
\nTo calculate the capacity requirement of a single rule, multiply the capacity\n requirement values of each of the rule's match settings:
\nA match setting with no criteria specified has a value of 1.
\nA match setting with Any
specified has a value of 1.
All other match settings have a value equal to the number of elements provided in\n the setting. For example, a protocol setting [\"UDP\"] and a source setting\n [\"10.0.0.0/24\"] each have a value of 1. A protocol setting [\"UDP\",\"TCP\"] has a value\n of 2. A source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"] has a value of 3.\n
\nA rule with no criteria specified in any of its match settings has a capacity\n requirement of 1. A rule with protocol setting [\"UDP\",\"TCP\"], source setting\n [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"], and a single specification or no specification\n for each of the other match settings has a capacity requirement of 6.
\n\n Capacity for a stateful rule group\n
\nFor a stateful rule group, the minimum capacity required is the number of individual rules that\n you expect to have in the rule group.
", "smithy.api#required": {} } }, @@ -893,7 +893,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains Certificate Manager certificate associations between and the scope configurations that Network Firewall uses to decrypt and re-encrypt traffic traveling through your firewall.
\nAfter you create a TLS inspection configuration, you can associate it with a new firewall policy.
\nTo update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.
\nTo manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.
\nTo retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.
\n\n For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS\ninspection configurations in the Network Firewall Developer Guide.\n
" + "smithy.api#documentation": "Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall.
\nTo update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.
\nTo manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.
\nTo retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.
\n\n For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS\ninspection configurations in the Network Firewall Developer Guide.\n
" } }, "com.amazonaws.networkfirewall#CreateTLSInspectionConfigurationRequest": { @@ -3073,27 +3073,27 @@ "LogType": { "target": "com.amazonaws.networkfirewall#LogType", "traits": { - "smithy.api#documentation": "The type of log to send. Alert logs report traffic that matches a StatefulRule with an action setting that sends an alert log message. Flow logs are\n standard network traffic flow logs.
", + "smithy.api#documentation": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.
\n\n ALERT
- Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see StatefulRule.
\n FLOW
- Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.
\n TLS
- Logs for events that are related to TLS inspection. For more information, see \n Inspecting SSL/TLS traffic with TLS inspection configurations \n in the Network Firewall Developer Guide.
The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket,\n a CloudWatch log group, or a Kinesis Data Firehose delivery stream.
", + "smithy.api#documentation": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket,\n a CloudWatch log group, or a Firehose delivery stream.
", "smithy.api#required": {} } }, "LogDestination": { "target": "com.amazonaws.networkfirewall#LogDestinationMap", "traits": { - "smithy.api#documentation": "The named location for the logs, provided in a key:value mapping that is specific to the\n chosen destination type.
\nFor an Amazon S3 bucket, provide the name of the bucket, with key bucketName
,\n and optionally provide a prefix, with key prefix
. The following example\n specifies an Amazon S3 bucket named\n DOC-EXAMPLE-BUCKET
and the prefix alerts
:
\n \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\"\n }
\n
For a CloudWatch log group, provide the name of the CloudWatch log group, with key\n logGroup
. The following example specifies a log group named\n alert-log-group
:
\n \"LogDestination\": { \"logGroup\": \"alert-log-group\" }
\n
For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key\n deliveryStream
. The following example specifies a delivery stream\n named alert-delivery-stream
:
\n \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\"\n }
\n
The named location for the logs, provided in a key:value mapping that is specific to the\n chosen destination type.
\nFor an Amazon S3 bucket, provide the name of the bucket, with key bucketName
,\n and optionally provide a prefix, with key prefix
.
The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET
and the prefix alerts
:
\n \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\"\n }
\n
For a CloudWatch log group, provide the name of the CloudWatch log group, with key\n logGroup
. The following example specifies a log group named\n alert-log-group
:
\n \"LogDestination\": { \"logGroup\": \"alert-log-group\" }
\n
For a Firehose delivery stream, provide the name of the delivery stream, with key\n deliveryStream
. The following example specifies a delivery stream\n named alert-delivery-stream
:
\n \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\"\n }
\n
Defines where Network Firewall sends logs for the firewall for one log type. This is used\n in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.
\nNetwork Firewall generates logs for stateful rule groups. You can save alert and flow log\n types. The stateful rules engine records flow logs for all network traffic that it receives.\n It records alert logs for traffic that matches stateful rules that have the rule\n action set to DROP
or ALERT
.
Defines where Network Firewall sends logs for the firewall for one log type. This is used\n in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.
\nNetwork Firewall generates logs for stateful rule groups. You can save alert, flow, and TLS log\n types.
" } }, "com.amazonaws.networkfirewall#LogDestinationConfigs": { @@ -3167,6 +3167,12 @@ "traits": { "smithy.api#enumValue": "FLOW" } + }, + "TLS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS" + } } } }, @@ -5089,7 +5095,7 @@ } }, "traits": { - "smithy.api#documentation": "Stateful inspection criteria for a domain list rule group.
\nFor HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.
\nBy default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET
rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and Stateful domain list rule groups in Network Firewall in the Network Firewall Developer Guide.
Stateful inspection criteria for a domain list rule group.
\nFor HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.
\nBy default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET
rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and \n Stateful domain list rule groups in Network Firewall in the Network Firewall Developer Guide.
Defines what Network Firewall should do with the packets in a traffic flow when the flow\n matches the stateful rule criteria. For all actions, Network Firewall performs the specified\n action and discontinues stateful inspection of the traffic flow.
\nThe actions for a stateful rule are defined as follows:
\n\n PASS - Permits the packets to go to the\n intended destination.
\n\n DROP - Blocks the packets from going to\n the intended destination and sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.
\n\n ALERT - Sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.
\nYou can use this action to test a rule that you intend to use to drop traffic. You\n can enable the rule with ALERT
action, verify in the logs that the rule\n is filtering as you want, then change the action to DROP
.
Defines what Network Firewall should do with the packets in a traffic flow when the flow\n matches the stateful rule criteria. For all actions, Network Firewall performs the specified\n action and discontinues stateful inspection of the traffic flow.
\nThe actions for a stateful rule are defined as follows:
\n\n PASS - Permits the packets to go to the\n intended destination.
\n\n DROP - Blocks the packets from going to\n the intended destination and sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.
\n\n ALERT - Sends an alert log message, if alert logging is configured in the Firewall\n LoggingConfiguration.
\nYou can use this action to test a rule that you intend to use to drop traffic. You\n can enable the rule with ALERT
action, verify in the logs that the rule\n is filtering as you want, then change the action to DROP
.
\n REJECT - Drops traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and an RST bit contained in the TCP header flags. REJECT is available only for TCP traffic. This option doesn't support FTP or IMAP protocols.
\nThe number of default VCPUs in an instance type.
" + } } }, "traits": { @@ -6019,6 +6025,9 @@ } } }, + "com.amazonaws.outposts#VCPUCount": { + "type": "integer" + }, "com.amazonaws.outposts#ValidationException": { "type": "structure", "members": { diff --git a/models/pi.json b/models/pi.json index fc5fe125bc..379ea8093d 100644 --- a/models/pi.json +++ b/models/pi.json @@ -665,7 +665,7 @@ "Dimensions": { "target": "com.amazonaws.pi#SanitizedStringList", "traits": { - "smithy.api#documentation": "A list of specific dimensions from a dimension group. If this parameter is not present,\n then it signifies that all of the dimensions in the group were requested, or are present in\n the response.
\nValid values for elements in the Dimensions
array are:
\n db.application.name
- The name of the application that is connected to the database. Valid values are as follows:
Aurora PostgreSQL
\nAmazon RDS PostgreSQL
\nAmazon DocumentDB
\n\n db.host.id
- The host ID of the connected client (all engines).
\n db.host.name
- The host name of the connected client (all engines).
\n db.name
- The name of the database to which the client is connected. Valid values are as follows:
Aurora PostgreSQL
\nAmazon RDS PostgreSQL
\nAurora MySQL
\nAmazon RDS MySQL
\nAmazon RDS MariaDB
\nAmazon DocumentDB
\n\n db.query.id
- The query ID generated by Performance Insights (only Amazon DocumentDB).
\n db.query.db_id
- The query ID generated by the database (only Amazon DocumentDB).
\n db.query.statement
- The text of the query that is being run (only Amazon DocumentDB).
\n db.query.tokenized_id
\n
\n db.query.tokenized.id
- The query digest ID generated by Performance Insights (only Amazon DocumentDB).
\n db.query.tokenized.db_id
- The query digest ID generated by Performance Insights (only Amazon DocumentDB).
\n db.query.tokenized.statement
- The text of the query digest (only Amazon DocumentDB).
\n db.session_type.name
- The type of the current session (only Amazon DocumentDB).
\n db.sql.id
- The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).
\n db.sql.db_id
- Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with\n pi-
(all engines except Amazon DocumentDB).
\n db.sql.statement
- The full text of the SQL statement that is running, as in SELECT * FROM employees
\n (all engines except Amazon DocumentDB)
\n db.sql.tokenized_id
\n
\n db.sql_tokenized.id
- The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console,\n db.sql_tokenized.id
is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot\n database issues.
\n db.sql_tokenized.db_id
- Either the native database ID used to refer to the SQL statement, or a synthetic ID such as\n pi-2372568224
that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).
\n db.sql_tokenized.statement
- The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id =\n ?
(all engines except Amazon DocumentDB)
\n db.user.id
- The ID of the user logged in to the database (all engines except Amazon DocumentDB).
\n db.user.name
- The name of the user logged in to the database (all engines except Amazon DocumentDB).
\n db.wait_event.name
- The event for which the backend is waiting (all engines except Amazon DocumentDB).
\n db.wait_event.type
- The type of event for which the backend is waiting (all engines except Amazon DocumentDB).
\n db.wait_event_type.name
- The name of the event type for which the backend is waiting (all engines except\n Amazon DocumentDB).
\n db.wait_state.name
- The event for which the backend is waiting (only Amazon DocumentDB).
A list of specific dimensions from a dimension group. If this parameter is not present,\n then it signifies that all of the dimensions in the group were requested, or are present in\n the response.
\nValid values for elements in the Dimensions
array are:
\n db.application.name
- The name of the application that is connected to the database. Valid values are as follows:
Aurora PostgreSQL
\nAmazon RDS PostgreSQL
\nAmazon DocumentDB
\n\n db.host.id
- The host ID of the connected client (all engines).
\n db.host.name
- The host name of the connected client (all engines).
\n db.name
- The name of the database to which the client is connected. Valid values are as follows:
Aurora PostgreSQL
\nAmazon RDS PostgreSQL
\nAurora MySQL
\nAmazon RDS MySQL
\nAmazon RDS MariaDB
\nAmazon DocumentDB
\n\n db.query.id
- The query ID generated by Performance Insights (only Amazon DocumentDB).
\n db.query.db_id
- The query ID generated by the database (only Amazon DocumentDB).
\n db.query.statement
- The text of the query that is being run (only Amazon DocumentDB).
\n db.query.tokenized_id
\n
\n db.query.tokenized.id
- The query digest ID generated by Performance Insights (only Amazon DocumentDB).
\n db.query.tokenized.db_id
- The query digest ID generated by Performance Insights (only Amazon DocumentDB).
\n db.query.tokenized.statement
- The text of the query digest (only Amazon DocumentDB).
\n db.session_type.name
- The type of the current session (only Amazon DocumentDB).
\n db.sql.id
- The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).
\n db.sql.db_id
- Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with\n pi-
(all engines except Amazon DocumentDB).
\n db.sql.statement
- The full text of the SQL statement that is running, as in SELECT * FROM employees
\n (all engines except Amazon DocumentDB)
\n db.sql.tokenized_id
- The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). The db.sql.tokenized_id
dimension \n fetches the value of the db.sql_tokenized.id
dimension. Amazon RDS returns db.sql.tokenized_id
from the db.sql
dimension group.\n
\n db.sql_tokenized.id
- The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console,\n db.sql_tokenized.id
is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot\n database issues.
\n db.sql_tokenized.db_id
- Either the native database ID used to refer to the SQL statement, or a synthetic ID such as\n pi-2372568224
that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).
\n db.sql_tokenized.statement
- The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id =\n ?
(all engines except Amazon DocumentDB)
\n db.user.id
- The ID of the user logged in to the database (all engines except Amazon DocumentDB).
\n db.user.name
- The name of the user logged in to the database (all engines except Amazon DocumentDB).
\n db.wait_event.name
- The event for which the backend is waiting (all engines except Amazon DocumentDB).
\n db.wait_event.type
- The type of event for which the backend is waiting (all engines except Amazon DocumentDB).
\n db.wait_event_type.name
- The name of the event type for which the backend is waiting (all engines except\n Amazon DocumentDB).
\n db.wait_state.name
- The event for which the backend is waiting (only Amazon DocumentDB).
Creates a new event destination in a configuration set.
\nAn event destination is a location where you send message events. The event options\n are Amazon CloudWatch, Amazon Kinesis Data Firehose, or Amazon SNS. For example,\n when a message is delivered successfully, you can send information about that event to\n an event destination, or send notifications to endpoints that are subscribed to an\n Amazon SNS topic.
\nEach configuration set can contain between 0 and 5 event destinations. Each event\n destination can contain a reference to a single destination, such as a CloudWatch\n or Kinesis Data Firehose destination.
" + "smithy.api#documentation": "Creates a new event destination in a configuration set.
\nAn event destination is a location where you send message events. The event options\n are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example,\n when a message is delivered successfully, you can send information about that event to\n an event destination, or send notifications to endpoints that are subscribed to an\n Amazon SNS topic.
\nEach configuration set can contain between 0 and 5 event destinations. Each event\n destination can contain a reference to a single destination, such as a CloudWatch\n or Firehose destination.
" } }, "com.amazonaws.pinpointsmsvoicev2#CreateEventDestinationRequest": { @@ -956,7 +956,7 @@ "MatchingEventTypes": { "target": "com.amazonaws.pinpointsmsvoicev2#EventTypeList", "traits": { - "smithy.api#documentation": "An array of event types that determine which events to log. If \"ALL\" is used, then\n Amazon Pinpoint logs every event type.
\nThe TEXT_SENT
event type is not supported.
An array of event types that determine which events to log. If \"ALL\" is used, then\n AWS End User Messaging SMS and Voice logs every event type.
\nThe TEXT_SENT
event type is not supported.
An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose.
" + "smithy.api#documentation": "An object that contains information about an event destination for logging to Amazon Data Firehose.
" } }, "SnsDestination": { @@ -1045,7 +1045,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new opt-out list.
\nIf the opt-out list name already exists, an error is returned.
\nAn opt-out list is a list of phone numbers that are opted out, meaning you can't send\n SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for\n the phone number is added to the opt-out list. In addition to STOP, your recipients can\n use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported\n opt-out keywords, see \n SMS opt out in the Amazon Pinpoint User\n Guide.
" + "smithy.api#documentation": "Creates a new opt-out list.
\nIf the opt-out list name already exists, an error is returned.
\nAn opt-out list is a list of phone numbers that are opted out, meaning you can't send\n SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for\n the phone number is added to the opt-out list. In addition to STOP, your recipients can\n use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported\n opt-out keywords, see \n SMS opt out in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#CreateOptOutListRequest": { @@ -1149,7 +1149,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrSenderIdOrArn", "traits": { - "smithy.api#documentation": "The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.
", + "smithy.api#documentation": "The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.
\nAfter the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.
", "smithy.api#required": {} } }, @@ -1163,7 +1163,7 @@ "MessageType": { "target": "com.amazonaws.pinpointsmsvoicev2#MessageType", "traits": { - "smithy.api#documentation": "The type of message. Valid values are TRANSACTIONAL for messages that are critical or\n time-sensitive and PROMOTIONAL for messages that aren't critical or\n time-sensitive.
", + "smithy.api#documentation": "The type of message. Valid values are TRANSACTIONAL for messages that are critical or\n time-sensitive and PROMOTIONAL for messages that aren't critical or\n time-sensitive. After the pool is created the MessageType can't be changed.
", "smithy.api#required": {} } }, @@ -1241,7 +1241,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" + "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" } }, "OptOutListName": { @@ -1254,7 +1254,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether shared routes are enabled for the pool.
" + "smithy.api#documentation": "Indicates whether shared routes are enabled for the pool. Set to false and only origination identities in this pool are used to send messages.
" } }, "DeletionProtectionEnabled": { @@ -2330,7 +2330,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing keyword from an origination phone number or pool.
\nA keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, Amazon Pinpoint responds with a customizable\n message.
\nKeywords \"HELP\" and \"STOP\" can't be deleted or modified.
" + "smithy.api#documentation": "Deletes an existing keyword from an origination phone number or pool.
\nA keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable\n message.
\nKeywords \"HELP\" and \"STOP\" can't be deleted or modified.
" } }, "com.amazonaws.pinpointsmsvoicev2#DeleteKeywordRequest": { @@ -2703,7 +2703,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" + "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" } }, "OptOutListName": { @@ -3157,7 +3157,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an account-level monthly spending limit override for sending text messages.\n Deleting a spend limit override will set the EnforcedLimit
to equal the\n MaxLimit
, which is controlled by Amazon Web Services. For more\n information on spend limits (quotas) see Amazon Pinpoint quotas \n in the Amazon Pinpoint Developer Guide.
Deletes an account-level monthly spending limit override for sending text messages.\n Deleting a spend limit override will set the EnforcedLimit
to equal the\n MaxLimit
, which is controlled by Amazon Web Services. For more\n information on spend limits (quotas) see Quotas \n in the AWS End User Messaging SMS User Guide.
Deletes an account level monthly spend limit override for sending voice messages.\n Deleting a spend limit override sets the EnforcedLimit
equal to the\n MaxLimit
, which is controlled by Amazon Web Services. For more\n information on spending limits (quotas) see Amazon Pinpoint quotas\n in the Amazon Pinpoint Developer Guide.
Deletes an account level monthly spend limit override for sending voice messages.\n Deleting a spend limit override sets the EnforcedLimit
equal to the\n MaxLimit
, which is controlled by Amazon Web Services. For more\n information on spending limits (quotas) see Quotas \n in the AWS End User Messaging SMS User Guide.
Describes attributes of your Amazon Web Services account. The supported account\n attributes include account tier, which indicates whether your account is in the sandbox\n or production environment. When you're ready to move your account out of the sandbox,\n create an Amazon Web Services Support case for a service limit increase request.
\nNew Amazon Pinpoint accounts are placed into an SMS or voice sandbox. The sandbox\n protects both Amazon Web Services end recipients and SMS or voice recipients from fraud\n and abuse.
", + "smithy.api#documentation": "Describes attributes of your Amazon Web Services account. The supported account\n attributes include account tier, which indicates whether your account is in the sandbox\n or production environment. When you're ready to move your account out of the sandbox,\n create an Amazon Web Services Support case for a service limit increase request.
\nNew accounts are placed into an SMS or voice sandbox. The sandbox\n protects both Amazon Web Services end recipients and SMS or voice recipients from fraud\n and abuse.
", "smithy.api#paginated": { "items": "AccountAttributes" } @@ -3413,7 +3413,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your\n account. The description for a quota includes the quota name, current usage toward that\n quota, and the quota's maximum value.
\nWhen you establish an Amazon Web Services account, the account has initial quotas on\n the maximum number of configuration sets, opt-out lists, phone numbers, and pools that\n you can create in a given Region. For more information see \n Amazon Pinpoint quotas in the Amazon Pinpoint Developer\n Guide.
", + "smithy.api#documentation": "Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for your\n account. The description for a quota includes the quota name, current usage toward that\n quota, and the quota's maximum value.
\nWhen you establish an Amazon Web Services account, the account has initial quotas on\n the maximum number of configuration sets, opt-out lists, phone numbers, and pools that\n you can create in a given Region. For more information see Quotas \n in the AWS End User Messaging SMS User Guide.
", "smithy.api#paginated": { "items": "AccountLimits" } @@ -3569,7 +3569,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the specified keywords or all keywords on your origination phone number or\n pool.
\nA keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, Amazon Pinpoint responds with a customizable\n message.
\nIf you specify a keyword that isn't valid, an error is returned.
", + "smithy.api#documentation": "Describes the specified keywords or all keywords on your origination phone number or\n pool.
\nA keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable\n message.
\nIf you specify a keyword that isn't valid, an error is returned.
", "smithy.api#paginated": { "items": "Keywords" } @@ -4849,7 +4849,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the current Amazon Pinpoint monthly spend limits for sending voice and\n text messages.
\nWhen you establish an Amazon Web Services account, the account has initial monthly\n spend limit in a given Region. For more information on increasing your monthly spend\n limit, see \n Requesting increases to your monthly SMS spending quota for Amazon Pinpoint\n in the Amazon Pinpoint User Guide.
", + "smithy.api#documentation": "Describes the current monthly spend limits for sending voice and\n text messages.
\nWhen you establish an Amazon Web Services account, the account has initial monthly\n spend limit in a given Region. For more information on increasing your monthly spend\n limit, see \n Requesting increases to your monthly SMS, MMS, or Voice spending quota\n in the AWS End User Messaging SMS User Guide.
", "smithy.api#paginated": { "items": "SpendLimits" } @@ -5355,7 +5355,7 @@ "KinesisFirehoseDestination": { "target": "com.amazonaws.pinpointsmsvoicev2#KinesisFirehoseDestination", "traits": { - "smithy.api#documentation": "An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose.
" + "smithy.api#documentation": "An object that contains information about an event destination for logging to Amazon Data Firehose.
" } }, "SnsDestination": { @@ -5366,7 +5366,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains information about an event destination.
\nEvent destinations are associated with configuration sets, which enable you to publish\n message sending events to CloudWatch, Kinesis Data Firehose, or Amazon SNS.
" + "smithy.api#documentation": "Contains information about an event destination.
\nEvent destinations are associated with configuration sets, which enable you to publish\n message sending events to CloudWatch, Firehose, or Amazon SNS.
" } }, "com.amazonaws.pinpointsmsvoicev2#EventDestinationList": { @@ -5731,7 +5731,7 @@ "CountryRuleSet": { "target": "com.amazonaws.pinpointsmsvoicev2#ProtectConfigurationCountryRuleSet", "traits": { - "smithy.api#documentation": "A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide.
", + "smithy.api#documentation": "A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide.
", "smithy.api#required": {} } } @@ -5914,7 +5914,7 @@ "IamRoleArn": { "target": "com.amazonaws.pinpointsmsvoicev2#IamRoleArn", "traits": { - "smithy.api#documentation": "The ARN of an Identity and Access Management role that is able to write\n event data to an Amazon Kinesis Data Firehose destination.
", + "smithy.api#documentation": "The ARN of an Identity and Access Management role that is able to write\n event data to an Amazon Data Firehose destination.
", "smithy.api#required": {} } }, @@ -5927,7 +5927,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains the delivery stream Amazon Resource Name (ARN), and the ARN of the Identity and Access Management (IAM) role associated with a Kinesis Data Firehose event\n destination.
\nEvent destinations, such as Kinesis Data Firehose, are associated with configuration\n sets, which enable you to publish message sending events.
" + "smithy.api#documentation": "Contains the delivery stream Amazon Resource Name (ARN), and the ARN of the Identity and Access Management (IAM) role associated with a Firehose event\n destination.
\nEvent destinations, such as Firehose, are associated with configuration\n sets, which enable you to publish message sending events.
" } }, "com.amazonaws.pinpointsmsvoicev2#LanguageCode": { @@ -6865,7 +6865,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "When set to false an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, Amazon Pinpoint automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out request. For more information see Self-managed opt-outs\n
", + "smithy.api#documentation": "When set to false an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out request. For more information see Self-managed opt-outs\n
", "smithy.api#required": {} } }, @@ -7195,7 +7195,7 @@ "name": "sms-voice" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "Welcome to the Amazon Pinpoint SMS and Voice, version 2 API Reference.\n This guide provides information about Amazon Pinpoint SMS and Voice, version 2 API\n resources, including supported HTTP methods, parameters, and schemas.
\nAmazon Pinpoint is an Amazon Web Services service that you can use to engage with\n your recipients across multiple messaging channels. The Amazon Pinpoint SMS and\n Voice, version 2 API provides programmatic access to options that are unique to the SMS\n and voice channels. Amazon Pinpoint SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.
\nIf you're new to Amazon Pinpoint SMS, it's also helpful to review the \n Amazon Pinpoint SMS User Guide. The Amazon Pinpoint\n Developer Guide provides tutorials, code samples, and procedures that\n demonstrate how to use Amazon Pinpoint SMS features programmatically and how to integrate\n Amazon Pinpoint functionality into mobile apps and other types of applications.\n The guide also provides key information, such as Amazon Pinpoint integration with\n other Amazon Web Services services, and the quotas that apply to use of the\n service.
\n\n Regional availability\n
\nThe Amazon Pinpoint SMS and Voice, version 2 API Reference is\n available in several Amazon Web Services Regions and it provides an endpoint for each of\n these Regions. For a list of all the Regions and endpoints where the API is currently\n available, see Amazon Web Services Service Endpoints and Amazon Pinpoint\n endpoints and quotas in the Amazon Web Services General Reference. To\n learn more about Amazon Web Services Regions, see Managing\n Amazon Web Services Regions in the Amazon Web Services General\n Reference.
\nIn each Region, Amazon Web Services maintains multiple Availability Zones. These\n Availability Zones are physically isolated from each other, but are united by private,\n low-latency, high-throughput, and highly redundant network connections. These\n Availability Zones enable us to provide very high levels of availability and redundancy,\n while also minimizing latency. To learn more about the number of Availability Zones that\n are available in each Region, see Amazon Web Services\n Global Infrastructure.\n
", + "smithy.api#documentation": "Welcome to the AWS End User Messaging SMS and Voice, version 2 API Reference.\n This guide provides information about AWS End User Messaging SMS and Voice, version 2 API\n resources, including supported HTTP methods, parameters, and schemas.
\nAmazon Pinpoint is an Amazon Web Services service that you can use to engage with\n your recipients across multiple messaging channels. The AWS End User Messaging SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS\n and voice channels. AWS End User Messaging SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.
\nIf you're new to AWS End User Messaging SMS and Voice, it's also helpful to review the \n AWS End User Messaging SMS User Guide. The AWS End User Messaging SMS User Guide\n provides tutorials, code samples, and procedures that\n demonstrate how to use AWS End User Messaging SMS and Voice features programmatically and how to integrate\n functionality into mobile apps and other types of applications.\n The guide also provides key information, such as AWS End User Messaging SMS and Voice integration with\n other Amazon Web Services services, and the quotas that apply to use of the\n service.
\n\n Regional availability\n
\nThe AWS End User Messaging SMS and Voice version 2 API Reference is\n available in several Amazon Web Services Regions and it provides an endpoint for each of\n these Regions. For a list of all the Regions and endpoints where the API is currently\n available, see Amazon Web Services Service Endpoints and Amazon Pinpoint\n endpoints and quotas in the Amazon Web Services General Reference. To\n learn more about Amazon Web Services Regions, see Managing\n Amazon Web Services Regions in the Amazon Web Services General\n Reference.
\nIn each Region, Amazon Web Services maintains multiple Availability Zones. These\n Availability Zones are physically isolated from each other, but are united by private,\n low-latency, high-throughput, and highly redundant network connections. These\n Availability Zones enable us to provide very high levels of availability and redundancy,\n while also minimizing latency. To learn more about the number of Availability Zones that\n are available in each Region, see Amazon Web Services\n Global Infrastructure.\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8110,7 +8110,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "When set to false, an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, Amazon Pinpoint automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out requests. For more information see Self-managed opt-outs\n
", + "smithy.api#documentation": "When set to false, an end recipient sends a message that begins with HELP or STOP to\n one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a\n customizable message and adds the end recipient to the OptOutList. When set to true\n you're responsible for responding to HELP and STOP requests. You're also responsible for\n tracking and honoring opt-out requests. For more information see Self-managed opt-outs\n
", "smithy.api#required": {} } }, @@ -8125,7 +8125,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Allows you to enable shared routes on your pool.
\nBy default, this is set to False
. If you set this value to\n True
, your messages are sent using phone numbers or sender IDs\n (depending on the country) that are shared with other Amazon Pinpoint users. In some\n countries, such as the United States, senders aren't allowed to use shared routes and\n must use a dedicated phone number or short code.
Allows you to enable shared routes on your pool.
\nBy default, this is set to False
. If you set this value to\n True
, your messages are sent using phone numbers or sender IDs\n (depending on the country) that are shared with other users. In some\n countries, such as the United States, senders aren't allowed to use shared routes and\n must use a dedicated phone number or short code.
Creates or updates a keyword configuration on an origination phone number or\n pool.
\nA keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, Amazon Pinpoint responds with a customizable\n message.
\nIf you specify a keyword that isn't valid, an error is returned.
" + "smithy.api#documentation": "Creates or updates a keyword configuration on an origination phone number or\n pool.
\nA keyword is a word that you can search for on a particular phone number or pool. It\n is also a specific word or phrase that an end user can send to your number to elicit a\n response, such as an informational message or a special offer. When your number receives\n a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable\n message.
\nIf you specify a keyword that isn't valid, an error is returned.
" } }, "com.amazonaws.pinpointsmsvoicev2#PutKeywordRequest": { @@ -9913,7 +9913,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" + "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" } }, "OptOutListName": { @@ -10082,7 +10082,7 @@ } ], "traits": { - "smithy.api#documentation": "Request an origination phone number for use in your account. For more information on\n phone number request see Requesting a\n number in the Amazon Pinpoint User Guide.
" + "smithy.api#documentation": "Request an origination phone number for use in your account. For more information on\n phone number request see Request a phone number in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#RequestPhoneNumberRequest": { @@ -10238,7 +10238,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" + "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" } }, "OptOutListName": { @@ -10900,7 +10900,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new text message and sends it to a recipient's phone number.
\nSMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit\n depends on the destination country of your messages, as well as the type of phone number\n (origination number) that you use to send the message. For more information, see Message Parts per\n Second (MPS) limits in the Amazon Pinpoint User\n Guide.
" + "smithy.api#documentation": "Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked.
\nSMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit\n depends on the destination country of your messages, as well as the type of phone number\n (origination number) that you use to send the message. For more information about MPS, see Message Parts per\n Second (MPS) limits in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#SendTextMessageRequest": { @@ -10946,13 +10946,13 @@ "MaxPrice": { "target": "com.amazonaws.pinpointsmsvoicev2#MaxPrice", "traits": { - "smithy.api#documentation": "The maximum amount that you want to spend, in US dollars, per each text message part.\n A text message can contain multiple parts.
" + "smithy.api#documentation": "The maximum amount that you want to spend, in US dollars, per each text message. If the calculated amount to send the text message is greater than MaxPrice
, the message is not sent and an error is returned.
How long the text message is valid for. By default this is 72 hours.
" + "smithy.api#documentation": "How long the text message is valid for, in seconds. By default this is 72 hours. If the messages isn't handed off before the TTL expires we stop attempting to hand off the message and return TTL_EXPIRED
event.
This field is used for any country-specific registration requirements. Currently, this\n setting is only used when you send messages to recipients in India using a sender ID.\n For more information see Special requirements for sending SMS messages to recipients in India.\n
" + "smithy.api#documentation": "This field is used for any country-specific registration requirements. Currently, this\n setting is only used when you send messages to recipients in India using a sender ID.\n For more information see Special requirements for sending SMS messages to recipients in India.\n
\n\n IN_ENTITY_ID
The entity ID or Principal\n Entity (PE) ID that you received after completing the sender ID\n registration process.
\n IN_TEMPLATE_ID
The template ID that you\n received after completing the sender ID registration\n process.
Make sure that the Template ID that you specify matches\n your message template exactly. If your message doesn't match\n the template that you provided during the registration\n process, the mobile carriers might reject your\n message.
\nWhen set to true, the message is checked and validated, but isn't sent to the end\n recipient.
" + "smithy.api#documentation": "When set to true, the message is checked and validated, but isn't sent to the end\n recipient. You are not charged for using DryRun
.
The Message Parts per Second (MPS) limit when using DryRun
is five. If\n your origination identity has a lower MPS limit then the lower MPS limit is used. For\n more information about MPS limits, see Message Parts per\n Second (MPS) limits in the AWS End User Messaging SMS User Guide..
Allows you to send a request that sends a voice message through Amazon Pinpoint.\n This operation uses Amazon Polly to\n convert a text script into a voice message.
" + "smithy.api#documentation": "Allows you to send a request that sends a voice message.\n This operation uses Amazon Polly to\n convert a text script into a voice message.
" } }, "com.amazonaws.pinpointsmsvoicev2#SendVoiceMessageRequest": { @@ -11154,7 +11154,7 @@ } }, "traits": { - "smithy.api#documentation": "The alphanumeric sender ID in a specific country that you want to describe. For more\n information on sender IDs see Requesting\n sender IDs for SMS messaging with Amazon Pinpoint\n in the Amazon Pinpoint User Guide.
" + "smithy.api#documentation": "The alphanumeric sender ID in a specific country that you want to describe. For more\n information on sender IDs see Requesting\n sender IDs in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#SenderIdFilter": { @@ -11872,7 +11872,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes the current Amazon Pinpoint monthly spend limits for sending voice and\n text messages. For more information on increasing your monthly spend limit, see \n Requesting increases to your monthly SMS spending quota for Amazon Pinpoint\n in the Amazon Pinpoint User Guide.
" + "smithy.api#documentation": "Describes the current monthly spend limits for sending voice and\n text messages. For more information on increasing your monthly spend limit, see \n Requesting a spending quota increase\n in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#SpendLimitList": { @@ -12130,7 +12130,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS\n Voice, version 2 resource. When you specify an existing tag key, the value is\n overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag\n consists of a key and an optional value. Tag keys must be unique per resource. For more\n information about tags, see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer\n Guide.
" + "smithy.api#documentation": "Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is\n overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag\n consists of a key and an optional value. Tag keys must be unique per resource. For more\n information about tags, see Tags in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#TagResourceRequest": { @@ -12289,7 +12289,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2\n resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer\n Guide.
" + "smithy.api#documentation": "Removes the association of the specified tags from a\n resource. For more information on tags see Tags in the AWS End User Messaging SMS User Guide.
" } }, "com.amazonaws.pinpointsmsvoicev2#UntagResourceRequest": { @@ -12350,7 +12350,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing event destination in a configuration set. You can update the\n IAM role ARN for CloudWatch Logs and Kinesis Data Firehose. You can\n also enable or disable the event destination.
\nYou may want to update an event destination to change its matching event types or\n updating the destination resource ARN. You can't change an event destination's type\n between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS.
" + "smithy.api#documentation": "Updates an existing event destination in a configuration set. You can update the\n IAM role ARN for CloudWatch Logs and Firehose. You can\n also enable or disable the event destination.
\nYou may want to update an event destination to change its matching event types or\n updating the destination resource ARN. You can't change an event destination's type\n between CloudWatch Logs, Firehose, and Amazon SNS.
" } }, "com.amazonaws.pinpointsmsvoicev2#UpdateEventDestinationRequest": { @@ -12391,7 +12391,7 @@ "KinesisFirehoseDestination": { "target": "com.amazonaws.pinpointsmsvoicev2#KinesisFirehoseDestination", "traits": { - "smithy.api#documentation": "An object that contains information about an event destination for logging to Kinesis Data Firehose.
" + "smithy.api#documentation": "An object that contains information about an event destination for logging to Firehose.
" } }, "SnsDestination": { @@ -12494,7 +12494,7 @@ "SelfManagedOptOutsEnabled": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" + "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" } }, "OptOutListName": { @@ -12690,7 +12690,7 @@ "SelfManagedOptOutsEnabled": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" + "smithy.api#documentation": "By default this is set to false. When an end recipient sends a message that begins\n with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically\n replies with a customizable message and adds the end recipient to the OptOutList. When\n set to true you're responsible for responding to HELP and STOP requests. You're also\n responsible for tracking and honoring opt-out requests.
" } }, "OptOutListName": { @@ -12766,7 +12766,7 @@ "target": "smithy.api#PrimitiveBoolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "When an end recipient sends a message that begins with HELP or STOP to one of your\n dedicated numbers, Amazon Pinpoint automatically replies with a customizable message\n and adds the end recipient to the OptOutList. When set to true you're responsible for\n responding to HELP and STOP requests. You're also responsible for tracking and honoring\n opt-out requests.
" + "smithy.api#documentation": "When an end recipient sends a message that begins with HELP or STOP to one of your\n dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message\n and adds the end recipient to the OptOutList. When set to true you're responsible for\n responding to HELP and STOP requests. You're also responsible for tracking and honoring\n opt-out requests.
" } }, "OptOutListName": { @@ -12878,7 +12878,7 @@ "CountryRuleSetUpdates": { "target": "com.amazonaws.pinpointsmsvoicev2#ProtectConfigurationCountryRuleSet", "traits": { - "smithy.api#documentation": "A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide.
", + "smithy.api#documentation": "A map of ProtectConfigurationCountryRuleSetInformation objects that contain the\n details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide.
", "smithy.api#required": {} } } diff --git a/models/quicksight.json b/models/quicksight.json index f169298e86..acb0b1f6ab 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -8237,6 +8237,9 @@ { "target": "com.amazonaws.quicksight#ConflictException" }, + { + "target": "com.amazonaws.quicksight#CustomerManagedKeyUnavailableException" + }, { "target": "com.amazonaws.quicksight#InternalFailureException" }, @@ -10995,6 +10998,25 @@ "smithy.api#documentation": "The configuration of custom values for the destination parameter in DestinationParameterValueConfiguration
.
The Amazon Web Services request ID for this operation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The customer managed key that is registered to your Amazon QuickSight account is unavailable.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.quicksight#Dashboard": { "type": "structure", "members": { @@ -52022,6 +52044,9 @@ { "target": "com.amazonaws.quicksight#ConflictException" }, + { + "target": "com.amazonaws.quicksight#CustomerManagedKeyUnavailableException" + }, { "target": "com.amazonaws.quicksight#InternalFailureException" }, diff --git a/models/rds.json b/models/rds.json index 902fc849c7..15ce53cad1 100644 --- a/models/rds.json +++ b/models/rds.json @@ -6134,6 +6134,12 @@ "smithy.api#required": {} } }, + "MinACU": { + "target": "com.amazonaws.rds#DoubleOptional", + "traits": { + "smithy.api#documentation": "The minimum capacity of the DB shard group in Aurora capacity units (ACUs).
" + } + }, "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { @@ -11096,6 +11102,12 @@ "smithy.api#documentation": "The maximum capacity of the DB shard group in Aurora capacity units (ACUs).
" } }, + "MinACU": { + "target": "com.amazonaws.rds#DoubleOptional", + "traits": { + "smithy.api#documentation": "The minimum capacity of the DB shard group in Aurora capacity units (ACUs).
" + } + }, "ComputeRedundancy": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { @@ -12137,19 +12149,19 @@ "SkipFinalSnapshot": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "Specifies whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted.\n If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot \n is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. \n By default, this parameter is disabled.
\nYou must specify a FinalDBSnapshotIdentifier
parameter if SkipFinalSnapshot
is disabled.
Specifies whether to skip the creation of a final DB cluster snapshot before RDS\n deletes the DB cluster. If you set this value to true
, RDS doesn't create a\n final DB cluster snapshot. If you set this value to false
or don't specify\n it, RDS creates a DB cluster snapshot before it deletes the DB cluster. By default, this\n parameter is disabled, so RDS creates a final DB cluster snapshot.
If SkipFinalSnapshot
is disabled, you must specify a value for the\n FinalDBSnapshotIdentifier
parameter.
The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot
\n is disabled.
Specifying this parameter and also skipping the creation of a final DB cluster snapshot \n with the SkipFinalShapshot
parameter results in an error.
Constraints:
\nMust be 1 to 255 letters, numbers, or hyphens.
\nFirst character must be a letter
\nCan't end with a hyphen or contain two consecutive hyphens
\nThe DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot
\n is disabled.
If you specify this parameter and also skip the creation of a final DB cluster\n snapshot with the SkipFinalShapshot
parameter, the request results in\n an error.
Constraints:
\nMust be 1 to 255 letters, numbers, or hyphens.
\nFirst character must be a letter
\nCan't end with a hyphen or contain two consecutive hyphens
\nSpecifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.\n
\nYou must delete automated backups for Amazon RDS Multi-AZ DB clusters. For more information about managing automated backups for RDS Multi-AZ DB clusters, see Managing automated backups.
\nSpecifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.
" } } }, @@ -13909,7 +13921,7 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "A specific source to return parameters for.
\nValid Values:
\n\n user
\n
\n engine
\n
\n service
\n
A specific source to return parameters for.
\nValid Values:
\n\n customer
\n
\n engine
\n
\n service
\n
The maximum capacity of the DB shard group in Aurora capacity units (ACUs).
" } + }, + "MinACU": { + "target": "com.amazonaws.rds#DoubleOptional", + "traits": { + "smithy.api#documentation": "The minimum capacity of the DB shard group in Aurora capacity units (ACUs).
" + } } }, "traits": { @@ -23780,13 +23798,13 @@ "DBSecurityGroupMemberships": { "target": "com.amazonaws.rds#DBSecurityGroupNameList", "traits": { - "smithy.api#documentation": "A list of DBSecurityGroupMembership name strings used for this option.
" + "smithy.api#documentation": "A list of DB security groups used for this option.
" } }, "VpcSecurityGroupMemberships": { "target": "com.amazonaws.rds#VpcSecurityGroupIdList", "traits": { - "smithy.api#documentation": "A list of VpcSecurityGroupMembership name strings used for this option.
" + "smithy.api#documentation": "A list of VPC security group names used for this option.
" } }, "OptionSettings": { @@ -23797,7 +23815,7 @@ } }, "traits": { - "smithy.api#documentation": "A list of all available options
" + "smithy.api#documentation": "A list of all available options for an option group.
" } }, "com.amazonaws.rds#OptionConfigurationList": { @@ -30782,7 +30800,7 @@ } }, "traits": { - "smithy.api#documentation": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
\nFor more information, see\n Tagging Amazon RDS Resources in the Amazon RDS User Guide\n or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.\n
" + "smithy.api#documentation": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
\nFor more information, see\n Tagging Amazon RDS resources in the Amazon RDS User Guide or \n Tagging Amazon Aurora and Amazon RDS resources in the Amazon Aurora User Guide.\n
" } }, "com.amazonaws.rds#TagList": { @@ -30794,7 +30812,7 @@ } }, "traits": { - "smithy.api#documentation": "A list of tags.\n For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.\n
" + "smithy.api#documentation": "A list of tags.
\nFor more information, see\n Tagging Amazon RDS resources in the Amazon RDS User Guide or \n Tagging Amazon Aurora and Amazon RDS resources in the Amazon Aurora User Guide.\n
" } }, "com.amazonaws.rds#TagListMessage": { diff --git a/models/redshift-serverless.json b/models/redshift-serverless.json index bf66f50b8c..3cdf696223 100644 --- a/models/redshift-serverless.json +++ b/models/redshift-serverless.json @@ -550,7 +550,7 @@ "roleArn": { "target": "com.amazonaws.redshiftserverless#IamRoleArn", "traits": { - "smithy.api#documentation": "The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Cluster Management Guide
", + "smithy.api#documentation": "The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Management Guide
", "smithy.api#required": {} } }, @@ -904,6 +904,9 @@ { "target": "com.amazonaws.redshiftserverless#InternalServerException" }, + { + "target": "com.amazonaws.redshiftserverless#Ipv6CidrBlockNotFoundException" + }, { "target": "com.amazonaws.redshiftserverless#ResourceNotFoundException" }, @@ -989,6 +992,12 @@ "traits": { "smithy.api#documentation": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.
" } + }, + "ipAddressType": { + "target": "com.amazonaws.redshiftserverless#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address type that the workgroup supports. Possible values are ipv4
and dualstack
.
There are no subnets in your VPC with associated IPv6 CIDR blocks. To use dual-stack mode, associate an IPv6 CIDR block with each subnet in your VPC.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.redshiftserverless#KmsKeyId": { "type": "string" }, @@ -3589,6 +3620,12 @@ "traits": { "smithy.api#documentation": "The availability Zone.
" } + }, + "ipv6Address": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The IPv6 address of the network interface within the subnet.
" + } } }, "traits": { @@ -5166,7 +5203,7 @@ "roleArn": { "target": "com.amazonaws.redshiftserverless#IamRoleArn", "traits": { - "smithy.api#documentation": "The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Cluster Management Guide
" + "smithy.api#documentation": "The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Management Guide
" } }, "state": { @@ -6195,7 +6232,7 @@ "roleArn": { "target": "com.amazonaws.redshiftserverless#IamRoleArn", "traits": { - "smithy.api#documentation": "The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Cluster Management Guide
" + "smithy.api#documentation": "The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. \n This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. \n For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for \n Amazon Redshift in the Amazon Redshift Management Guide
" } }, "enabled": { @@ -6452,6 +6489,9 @@ { "target": "com.amazonaws.redshiftserverless#InternalServerException" }, + { + "target": "com.amazonaws.redshiftserverless#Ipv6CidrBlockNotFoundException" + }, { "target": "com.amazonaws.redshiftserverless#ResourceNotFoundException" }, @@ -6520,6 +6560,12 @@ "traits": { "smithy.api#documentation": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.
" } + }, + "ipAddressType": { + "target": "com.amazonaws.redshiftserverless#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address type that the workgroup supports. Possible values are ipv4
and dualstack
.
A list of VPCs. Each entry is the unique identifier of a virtual private cloud with access to Amazon Redshift Serverless. If all of the VPCs for the grantee are allowed, it shows an asterisk.
" } + }, + "ipAddressType": { + "target": "com.amazonaws.redshiftserverless#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address type that the workgroup supports. Possible values are ipv4
and dualstack
.
Indicates the identifier of the grouping recommendation.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Indicates the grouping recommendation you have accepted to include in your application.
" + } + }, + "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Accepts the resource grouping recommendations suggested by Resilience Hub for your application.
", + "smithy.api#http": { + "method": "POST", + "uri": "/accept-resource-grouping-recommendations", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Indicates the list of resource grouping recommendations you want to include in your application.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#AcceptResourceGroupingRecommendationsResponse": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Indicates the list of resource grouping recommendations that could not be included in your application.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#AccessDeniedException": { "type": "structure", "members": { @@ -35,6 +140,9 @@ { "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" }, + { + "target": "com.amazonaws.resiliencehub#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.resiliencehub#ThrottlingException" }, @@ -43,7 +151,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of resources suported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide.
", + "smithy.api#documentation": "Adds the source of resource-maps to the draft version of an application. During\n assessment, Resilience Hub will use these resource-maps to resolve the latest physical\n ID for each resource in the application template. For more information about different types\n of resources supported by Resilience Hub and how to add them in your application, see\n Step\n 2: How is your application managed? in the Resilience Hub User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/add-draft-app-version-resource-mappings", @@ -470,6 +578,12 @@ "traits": { "smithy.api#documentation": "Indicates if compliance drifts (deviations) were detected while running an assessment for\n your application.
" } + }, + "summary": { + "target": "com.amazonaws.resiliencehub#AssessmentSummary", + "traits": { + "smithy.api#documentation": "Indicates a concise summary that provides an overview of the Resilience Hub assessment.
" + } } }, "traits": { @@ -553,7 +667,7 @@ "complianceStatus": { "target": "com.amazonaws.resiliencehub#ComplianceStatus", "traits": { - "smithy.api#documentation": "TCurrent status of compliance for the resiliency policy.
" + "smithy.api#documentation": "Current\n status of compliance for the resiliency policy.
" } }, "cost": { @@ -611,6 +725,14 @@ { "value": "ChangesDetected", "name": "CHANGES_DETECTED" + }, + { + "value": "NotApplicable", + "name": "NOT_APPLICABLE" + }, + { + "value": "MissingPolicy", + "name": "MISSING_POLICY" } ] } @@ -619,7 +741,7 @@ "type": "structure", "members": { "name": { - "target": "com.amazonaws.resiliencehub#String255", + "target": "com.amazonaws.resiliencehub#EntityName255", "traits": { "smithy.api#documentation": "Name of the Application Component.
", "smithy.api#required": {} @@ -633,7 +755,7 @@ } }, "id": { - "target": "com.amazonaws.resiliencehub#String255", + "target": "com.amazonaws.resiliencehub#EntityName255", "traits": { "smithy.api#documentation": "Identifier of the Application Component.
" } @@ -966,6 +1088,38 @@ ] } }, + "com.amazonaws.resiliencehub#AssessmentRiskRecommendation": { + "type": "structure", + "members": { + "risk": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the description of the potential risk identified in the application as part of the Resilience Hub assessment.
\nThis property is available only in the US East (N. Virginia) Region.
\nIndicates the recommendation provided by the Resilience Hub to address the identified\n risks in the application.
\nThis property is available only in the US East (N. Virginia) Region.
\nIndicates the Application Components (AppComponents) that were assessed as part of the\n assessnent and are associated with the identified risk and recommendation.
\nThis property is available only in the US East (N. Virginia) Region.
\nIndicates a specific risk identified in the Resilience Hub assessment and the corresponding recommendation provided to address that risk.
\nThe assessment summary generated by large language models (LLMs) on Amazon Bedrock are only suggestions. \n The current level of generative AI technology is not perfect and LLMs are not infallible. \n Bias and incorrect answers, although rare, should be expected. Review each recommendation in the assessment summary before you use the output from an LLM.\n
\nThis property is available only in the US East (N. Virginia) Region.
\nIndicates a concise summary that provides an overview of the Resilience Hub assessment.
\nThis property is available only in the US East (N. Virginia) Region.
\nIndicates the top risks and recommendations identified by the Resilience Hub assessment, \n each representing a specific risk and the corresponding recommendation to address it.
\nThis property is available only in the US East (N. Virginia) Region.
\nIndicates the AI-generated summary for the Resilience Hub assessment, providing a concise overview that highlights the top risks and recommendations.
\nThis property is available only in the US East (N. Virginia) Region.
\nDefines the list of operational recommendations that need to be included or excluded.
", + "smithy.api#documentation": "Defines the list of operational recommendations that need to be included or\n excluded.
", "smithy.api#required": {} } } @@ -2176,7 +2365,7 @@ "failedEntries": { "target": "com.amazonaws.resiliencehub#BatchUpdateRecommendationStatusFailedEntries", "traits": { - "smithy.api#documentation": "A list of items with error details about each item, which could not be included or excluded.
", + "smithy.api#documentation": "A list of items with error details about each item, which could not be included or\n excluded.
", "smithy.api#required": {} } } @@ -2306,7 +2495,7 @@ "diffType": { "target": "com.amazonaws.resiliencehub#DifferenceType", "traits": { - "smithy.api#documentation": "Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.
" + "smithy.api#documentation": "Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only\n NotEqual
difference type.
Creates an Resilience Hub application. An Resilience Hub application is a\n collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application,\n you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate\n resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information about the number of resources supported per application, see Service\n quotas.
\nAfter you create an Resilience Hub application, you publish it so that you can run a resiliency\n assessment on it. You can then use recommendations from the assessment to improve resiliency\n by running another assessment, comparing results, and then iterating the process until you\n achieve your goals for recovery time objective (RTO) and recovery point objective\n (RPO).
", + "smithy.api#documentation": "Creates an Resilience Hub application. An Resilience Hub application is a\n collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an\n application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate\n resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information\n about the number of resources supported per application, see Service\n quotas.
\nAfter you create an Resilience Hub application, you publish it so that you can run\n a resiliency assessment on it. You can then use recommendations from the assessment to improve\n resiliency by running another assessment, comparing results, and then iterating the process\n until you achieve your goals for recovery time objective (RTO) and recovery point objective\n (RPO).
", "smithy.api#http": { "method": "POST", "uri": "/create-app", @@ -2640,7 +2837,7 @@ "assessmentSchedule": { "target": "com.amazonaws.resiliencehub#AppAssessmentScheduleType", "traits": { - "smithy.api#documentation": "\n Assessment execution schedule with 'Daily' or 'Disabled' values.\n
" + "smithy.api#documentation": "Assessment execution schedule with 'Daily' or 'Disabled' values.
" } }, "permissionModel": { @@ -2652,7 +2849,7 @@ "eventSubscriptions": { "target": "com.amazonaws.resiliencehub#EventSubscriptionList", "traits": { - "smithy.api#documentation": "The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports only Drift detected and Scheduled assessment failure events notification.
" + "smithy.api#documentation": "The list of events you would like to subscribe and get notification for. Currently,\n Resilience Hub supports only Drift detected and\n Scheduled assessment failure events notification.
" } } } @@ -2701,7 +2898,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new Application Component in the Resilience Hub application.
\nThis API updates the Resilience Hub application draft version. To use this Application Component for running assessments, you must publish the Resilience Hub application using the PublishAppVersion
API.
Creates a new Application Component in the Resilience Hub application.
\nThis API updates the Resilience Hub application draft version. To use this\n Application Component for running assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
Adds a resource to the Resilience Hub application and assigns it to the specified\n Application Components. If you specify a new Application Component, Resilience Hub will automatically\n create the Application Component.
\nThis action has no effect outside Resilience Hub.
\nThis API updates the Resilience Hub application draft version. To use this resource\n for running resiliency assessments, you must publish the Resilience Hub application using\n the PublishAppVersion
API.
To update application version with new physicalResourceID
, you must\n call ResolveAppVersionResources
API.
Adds a resource to the Resilience Hub application and assigns it to the specified\n Application Components. If you specify a new Application Component, Resilience Hub will\n automatically create the Application Component.
\nThis action has no effect outside Resilience Hub.
\nThis API updates the Resilience Hub application draft version. To use this\n resource for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
To update application version with new physicalResourceID
, you must\n call ResolveAppVersionResources
API.
Creates a resiliency policy for an application.
\nResilience Hub allows you to provide a value of zero for rtoInSecs
and\n rpoInSecs
of your resiliency policy. But, while assessing your application, the lowest possible assessment result is near zero. Hence, if you provide value\n zero for rtoInSecs
and rpoInSecs
, the estimated workload RTO and estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.
Creates a resiliency policy for an application.
\nResilience Hub allows you to provide a value of zero for rtoInSecs
\n and rpoInSecs
of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value zero for\n rtoInSecs
and rpoInSecs
, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.
Deletes an Resilience Hub application assessment. This is a destructive action that can't\n be undone.
", + "smithy.api#documentation": "Deletes an Resilience Hub application assessment. This is a destructive action\n that can't be undone.
", "smithy.api#http": { "method": "POST", "uri": "/delete-app-assessment", @@ -3294,7 +3491,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes the input source and all of its imported resources from the Resilience Hub application.
", + "smithy.api#documentation": "Deletes the input source and all of its imported resources from the Resilience Hub\n application.
", "smithy.api#http": { "method": "POST", "uri": "/delete-app-input-source", @@ -3315,13 +3512,13 @@ "sourceArn": { "target": "com.amazonaws.resiliencehub#Arn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the imported resource you want to remove from the\n Resilience Hub application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the imported resource you want to remove from the\n Resilience Hub application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
" } }, "terraformSource": { "target": "com.amazonaws.resiliencehub#TerraformSource", "traits": { - "smithy.api#documentation": "The imported Terraform s3 state file you want to remove from the Resilience Hub application.
" + "smithy.api#documentation": "The imported Terraform s3 state file you want to remove from the Resilience Hub\n application.
" } }, "clientToken": { @@ -3334,7 +3531,7 @@ "eksSourceClusterNamespace": { "target": "com.amazonaws.resiliencehub#EksSourceClusterNamespace", "traits": { - "smithy.api#documentation": "The namespace on your Amazon Elastic Kubernetes Service cluster that you want to delete from the Resilience Hub application.
" + "smithy.api#documentation": "The namespace on your Amazon Elastic Kubernetes Service cluster that you want to delete from the\n Resilience Hub application.
" } } } @@ -3422,7 +3619,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an Application Component from the Resilience Hub application.
\nThis API updates the Resilience Hub application draft version. To use this Application Component for running assessments, you must publish the Resilience Hub application using the PublishAppVersion
API.
You will not be able to delete an Application Component if it has resources associated with it.
\nDeletes an Application Component from the Resilience Hub application.
\nThis API updates the Resilience Hub application draft version. To use this\n Application Component for running assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
You will not be able to delete an Application Component if it has resources associated\n with it.
\nDeletes a resource from the Resilience Hub application.
\nYou can only delete a manually added resource. To exclude non-manually added resources, use the UpdateAppVersionResource
API.
This action has no effect outside Resilience Hub.
\nThis API updates the Resilience Hub application draft version. To use this resource for running resiliency assessments, you must publish the Resilience Hub application using the PublishAppVersion
API.
Deletes a resource from the Resilience Hub application.
\nYou can only delete a manually added resource. To exclude non-manually added\n resources, use the UpdateAppVersionResource
API.
This action has no effect outside Resilience Hub.
\nThis API updates the Resilience Hub application draft version. To use this\n resource for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
The assessment for an Resilience Hub application, returned as an object. This object\n includes Amazon Resource Names (ARNs), compliance information, compliance status, cost,\n messages, resiliency scores, and more.
", + "smithy.api#documentation": "The assessment for an Resilience Hub application, returned as an object. This\n object includes Amazon Resource Names (ARNs), compliance information, compliance status, cost,\n messages, resiliency scores, and more.
", "smithy.api#required": {} } } @@ -4018,7 +4215,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes a resource of the Resilience Hub application.
\nThis API accepts only one of the following parameters to descibe the resource:
\n\n resourceName
\n
\n logicalResourceId
\n
\n physicalResourceId
(Along with physicalResourceId
, you can also\n provide awsAccountId
, and awsRegion
)
Describes a resource of the Resilience Hub application.
\nThis API accepts only one of the following parameters to describe the resource:
\n\n resourceName
\n
\n logicalResourceId
\n
\n physicalResourceId
(Along with physicalResourceId
, you can\n also provide awsAccountId
, and awsRegion
)
A JSON string that provides information about your application structure. To learn more\n about the appTemplateBody
template, see the sample template provided in the\n Examples section.
The appTemplateBody
JSON string has the following structure:
\n \n resources
\n \n
The list of logical resources that must be included in the Resilience Hub\n application.
\nType: Array
\nDon't add the resources that you want to exclude.
\nEach resources
array item includes the following fields:
\n \n logicalResourceId
\n \n
Logical identifier of the resource.
\nType: Object
\nEach logicalResourceId
object includes the following fields:
\n identifier
\n
Identifier of the resource.
\nType: String
\n\n logicalStackName
\n
The name of the CloudFormation stack this resource belongs to.
\nType: String
\n\n resourceGroupName
\n
The name of the resource group this resource belongs to.
\nType: String
\n\n terraformSourceName
\n
The name of the Terraform S3 state file this resource belongs to.
\nType: String
\n\n eksSourceName
\n
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
\nThis parameter accepts values in \"eks-cluster/namespace\" format.
\nType: String
\n\n \n type
\n \n
The type of resource.
\nType: string
\n\n \n name
\n \n
The name of the resource.
\nType: String
\n\n additionalInfo
\n
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo
through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
\nKey: \"failover-regions\"
\n
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
\n
\n \n appComponents
\n \n
List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
\nType: Array
\nEach appComponents
array item includes the following fields:
\n name
\n
Name of the Application Component.
\nType: String
\n\n type
\n
Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
\nType: String
\n\n resourceNames
\n
The list of included resources that are assigned to the Application Component.
\nType: Array of strings
\n\n additionalInfo
\n
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo
through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
\nKey: \"failover-regions\"
\n
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
\n
\n \n excludedResources
\n \n
The list of logical resource identifiers to be excluded from the application.
\nType: Array
\nDon't add the resources that you want to include.
\nEach excludedResources
array item includes the following fields:
\n \n logicalResourceIds
\n \n
Logical identifier of the resource.
\nType: Object
\nYou can configure only one of the following fields:
\n\n logicalStackName
\n
\n resourceGroupName
\n
\n terraformSourceName
\n
\n eksSourceName
\n
Each logicalResourceIds
object includes the following fields:
\n identifier
\n
Identifier of the resource.
\nType: String
\n\n logicalStackName
\n
The name of the CloudFormation stack this resource belongs to.
\nType: String
\n\n resourceGroupName
\n
The name of the resource group this resource belongs to.
\nType: String
\n\n terraformSourceName
\n
The name of the Terraform S3 state file this resource belongs to.
\nType: String
\n\n eksSourceName
\n
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
\nThis parameter accepts values in \"eks-cluster/namespace\" format.
\nType: String
\n\n \n version
\n \n
Resilience Hub application version.
\n\n additionalInfo
\n
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo
through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
\nKey: \"failover-regions\"
\n
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
\n
A JSON string that provides information about your application structure. To learn more\n about the appTemplateBody
template, see the sample template provided in the\n Examples section.
The appTemplateBody
JSON string has the following structure:
\n \n resources
\n \n
The list of logical resources that must be included in the Resilience Hub\n application.
\nType: Array
\nDon't add the resources that you want to exclude.
\nEach resources
array item includes the following fields:
\n \n logicalResourceId
\n \n
Logical identifier of the resource.
\nType: Object
\nEach logicalResourceId
object includes the following fields:
\n identifier
\n
Identifier of the resource.
\nType: String
\n\n logicalStackName
\n
The name of the CloudFormation stack this resource belongs to.
\nType: String
\n\n resourceGroupName
\n
The name of the resource group this resource belongs to.
\nType: String
\n\n terraformSourceName
\n
The name of the Terraform S3 state file this resource belongs to.
\nType: String
\n\n eksSourceName
\n
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
\nThis parameter accepts values in \"eks-cluster/namespace\" format.
\nType: String
\n\n \n type
\n \n
The type of resource.
\nType: string
\n\n \n name
\n \n
The name of the resource.
\nType: String
\n\n additionalInfo
\n
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo
through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
\nKey: \"failover-regions\"
\n
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
\n
\n \n appComponents
\n \n
List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
\nType: Array
\nEach appComponents
array item includes the following fields:
\n name
\n
Name of the Application Component.
\nType: String
\n\n type
\n
Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
\nType: String
\n\n resourceNames
\n
The list of included resources that are assigned to the Application Component.
\nType: Array of strings
\n\n additionalInfo
\n
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo
through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
\nKey: \"failover-regions\"
\n
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
\n
\n \n excludedResources
\n \n
The list of logical resource identifiers to be excluded from the application.
\nType: Array
\nDon't add the resources that you want to include.
\nEach excludedResources
array item includes the following fields:
\n \n logicalResourceIds
\n \n
Logical identifier of the resource.
\nType: Object
\nYou can configure only one of the following fields:
\n\n logicalStackName
\n
\n resourceGroupName
\n
\n terraformSourceName
\n
\n eksSourceName
\n
Each logicalResourceIds
object includes the following fields:
\n identifier
\n
Identifier of the resource.
\nType: String
\n\n logicalStackName
\n
The name of the CloudFormation stack this resource belongs to.
\nType: String
\n\n resourceGroupName
\n
The name of the resource group this resource belongs to.
\nType: String
\n\n terraformSourceName
\n
The name of the Terraform S3 state file this resource belongs to.
\nType: String
\n\n eksSourceName
\n
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
\nThis parameter accepts values in \"eks-cluster/namespace\" format.
\nType: String
\n\n \n version
\n \n
Resilience Hub application version.
\n\n additionalInfo
\n
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo
through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
\nKey: \"failover-regions\"
\n
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
\n
Describes the status of importing resources to an application version.
\nIf you get a 404 error with\n ResourceImportStatusNotFoundAppMetadataException
, you must call\n importResourcesToDraftAppVersion
after creating the application and before\n calling describeDraftAppVersionResourcesImportStatus
to obtain the\n status.
Describes the status of importing resources to an application version.
\nIf you get a 404 error with\n ResourceImportStatusNotFoundAppMetadataException
, you must call\n importResourcesToDraftAppVersion
after creating the application and before\n calling describeDraftAppVersionResourcesImportStatus
to obtain the\n status.
Describes the resource grouping recommendation tasks run by Resilience Hub for your application.
", + "smithy.api#http": { + "method": "POST", + "uri": "/describe-resource-grouping-recommendation-task", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTaskRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Indicates the identifier of the grouping recommendation task.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#DescribeResourceGroupingRecommendationTaskResponse": { + "type": "structure", + "members": { + "groupingId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the identifier of the grouping recommendation task.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.resiliencehub#ResourcesGroupingRecGenStatusType", + "traits": { + "smithy.api#documentation": "Status of the action.
", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.resiliencehub#String500", + "traits": { + "smithy.api#documentation": "Indicates the error that occurred while generating a grouping recommendation.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#DifferenceType": { "type": "string", "traits": { @@ -4714,6 +4994,12 @@ "smithy.api#pattern": "^[A-Za-z0-9][A-Za-z0-9_\\-]{1,59}$" } }, + "com.amazonaws.resiliencehub#EntityName255": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9][A-Za-z0-9_\\-]{0,254}$" + } + }, "com.amazonaws.resiliencehub#EntityNameList": { "type": "list", "member": { @@ -4832,6 +5118,34 @@ ] } }, + "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntry" + } + }, + "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntry": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the identifier of the grouping recommendation.
", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.resiliencehub#ErrorMessage", + "traits": { + "smithy.api#documentation": "Indicates the error that occurred while implementing a grouping recommendation.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Indicates the accepted grouping recommendation whose implementation failed.
" + } + }, "com.amazonaws.resiliencehub#FailurePolicy": { "type": "structure", "members": { @@ -4856,12 +5170,224 @@ "smithy.api#documentation": "Defines a failure policy.
" } }, - "com.amazonaws.resiliencehub#HaArchitecture": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "MultiSite", + "com.amazonaws.resiliencehub#GroupingAppComponent": { + "type": "structure", + "members": { + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "Indicates the identifier of an AppComponent.
", + "smithy.api#required": {} + } + }, + "appComponentType": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the type of an AppComponent.
", + "smithy.api#required": {} + } + }, + "appComponentName": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "Indicates the name of an AppComponent.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Creates a new recommended Application Component (AppComponent).
" + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendation": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates all the reasons available for rejecting a grouping recommendation.
", + "smithy.api#required": {} + } + }, + "groupingAppComponent": { + "target": "com.amazonaws.resiliencehub#GroupingAppComponent", + "traits": { + "smithy.api#documentation": "Indicates the name of the recommended Application Component (AppComponent).
", + "smithy.api#required": {} + } + }, + "resources": { + "target": "com.amazonaws.resiliencehub#GroupingResourceList", + "traits": { + "smithy.api#documentation": "Indicates the resources that are grouped in a recommended AppComponent.
", + "smithy.api#required": {} + } + }, + "score": { + "target": "com.amazonaws.resiliencehub#Double", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "Indicates the confidence level of the grouping recommendation.
", + "smithy.api#required": {} + } + }, + "recommendationReasons": { + "target": "com.amazonaws.resiliencehub#String255List", + "traits": { + "smithy.api#documentation": "Indicates all the reasons available for rejecting a grouping recommendation.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationStatusType", + "traits": { + "smithy.api#documentation": "Indicates the status of grouping resources into AppComponents.
", + "smithy.api#required": {} + } + }, + "confidenceLevel": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationConfidenceLevel", + "traits": { + "smithy.api#documentation": "Indicates the confidence level of Resilience Hub on the grouping recommendation.
", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.resiliencehub#TimeStamp", + "traits": { + "smithy.api#documentation": "Indicates the creation time of the grouping recommendation.
", + "smithy.api#required": {} + } + }, + "rejectionReason": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason", + "traits": { + "smithy.api#documentation": "Indicates the reason you had selected while rejecting a grouping recommendation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Creates a new grouping recommendation.
" + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationConfidenceLevel": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "HIGH", + "value": "High" + }, + { + "name": "MEDIUM", + "value": "Medium" + } + ] + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendation" + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "DISTINCT_BUSINESS_PURPOSE", + "value": "DistinctBusinessPurpose" + }, + { + "name": "SEPARATE_DATA_CONCERN", + "value": "SeparateDataConcern" + }, + { + "name": "DISTINCT_USER_GROUP_HANDLING", + "value": "DistinctUserGroupHandling" + }, + { + "name": "OTHER", + "value": "Other" + } + ] + } + }, + "com.amazonaws.resiliencehub#GroupingRecommendationStatusType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "ACCEPTED", + "value": "Accepted" + }, + { + "name": "REJECTED", + "value": "Rejected" + }, + { + "name": "PENDING_DECISION", + "value": "PendingDecision" + } + ] + } + }, + "com.amazonaws.resiliencehub#GroupingResource": { + "type": "structure", + "members": { + "resourceName": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the resource name.
", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the resource type.
", + "smithy.api#required": {} + } + }, + "physicalResourceId": { + "target": "com.amazonaws.resiliencehub#PhysicalResourceId", + "traits": { + "smithy.api#documentation": "Indicates the physical identifier of the resource.
", + "smithy.api#required": {} + } + }, + "logicalResourceId": { + "target": "com.amazonaws.resiliencehub#LogicalResourceId", + "traits": { + "smithy.api#documentation": "Indicates the logical identifier of the resource.
", + "smithy.api#required": {} + } + }, + "sourceAppComponentIds": { + "target": "com.amazonaws.resiliencehub#String255List", + "traits": { + "smithy.api#documentation": "Indicates the identifier of the source AppComponents in which the resources were previously grouped into.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Indicates the resource that will be grouped in the recommended Application Component (AppComponent).
" + } + }, + "com.amazonaws.resiliencehub#GroupingResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#GroupingResource" + } + }, + "com.amazonaws.resiliencehub#HaArchitecture": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "MultiSite", "name": "MULTI_SITE" }, { @@ -4939,7 +5465,7 @@ } ], "traits": { - "smithy.api#documentation": "Imports resources to Resilience Hub application draft version from different input sources. For more information about the input sources supported by Resilience Hub, see Discover\n the structure and describe your Resilience Hub application.
", + "smithy.api#documentation": "Imports resources to Resilience Hub application draft version from different input\n sources. For more information about the input sources supported by Resilience Hub, see\n Discover the structure and describe your Resilience Hub application.
", "smithy.api#http": { "method": "POST", "uri": "/import-resources-to-draft-app-version", @@ -5016,7 +5542,7 @@ "terraformSources": { "target": "com.amazonaws.resiliencehub#TerraformSourceList", "traits": { - "smithy.api#documentation": "\n A list of terraform file s3 URLs you have imported.\n
" + "smithy.api#documentation": "A list of terraform file s3 URLs you have imported.
" } }, "eksSources": { @@ -5118,7 +5644,7 @@ "alarmRecommendations": { "target": "com.amazonaws.resiliencehub#AlarmRecommendationList", "traits": { - "smithy.api#documentation": "The alarm recommendations for an Resilience Hub application, returned as an object. This\n object includes Application Component names, descriptions, information about whether a\n recommendation has already been implemented or not, prerequisites, and more.
", + "smithy.api#documentation": "The alarm recommendations for an Resilience Hub application, returned as an\n object. This object includes Application Component names, descriptions, information about whether a\n recommendation has already been implemented or not, prerequisites, and more.
", "smithy.api#required": {} } }, @@ -5179,13 +5705,13 @@ "nextToken": { "target": "com.amazonaws.resiliencehub#NextToken", "traits": { - "smithy.api#documentation": "Indicates the unique token number of the next application to be checked for compliance and regulatory requirements from the list of applications.
" + "smithy.api#documentation": "Null, or the token from a previous call to get the next set of results.
" } }, "maxResults": { "target": "com.amazonaws.resiliencehub#MaxResults", "traits": { - "smithy.api#documentation": "Indicates the maximum number of applications requested.
" + "smithy.api#documentation": "Indicates the maximum number of compliance drifts requested.
" } } }, @@ -5199,14 +5725,14 @@ "complianceDrifts": { "target": "com.amazonaws.resiliencehub#ComplianceDriftList", "traits": { - "smithy.api#documentation": "Indicates compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) detected for an assessed entity.
", + "smithy.api#documentation": "Indicates compliance drifts (recovery time objective (RTO) and recovery point objective\n (RPO)) detected for an assessed entity.
", "smithy.api#required": {} } }, "nextToken": { "target": "com.amazonaws.resiliencehub#NextToken", "traits": { - "smithy.api#documentation": "Token number of the next application to be checked for compliance and regulatory requirements from the list of applications.
" + "smithy.api#documentation": "Null, or the token from a previous call to get the next set of results.
" } } }, @@ -5237,7 +5763,7 @@ } ], "traits": { - "smithy.api#documentation": "Indicates the list of resource drifts that were detected while running an assessment.
", + "smithy.api#documentation": "Indicates the list of resource drifts that were detected while running an\n assessment.
", "smithy.api#http": { "method": "POST", "uri": "/list-app-assessment-resource-drifts", @@ -5270,7 +5796,7 @@ "maxResults": { "target": "com.amazonaws.resiliencehub#MaxResults", "traits": { - "smithy.api#documentation": "Indicates the maximum number of drift results to include in the response. If more results exist than the specified MaxResults
value, a token is included in the response so that the remaining results can be retrieved.
Indicates the maximum number of drift results to include in the response. If more results\n exist than the specified MaxResults
value, a token is included in the response so\n that the remaining results can be retrieved.
Lists the assessments for an Resilience Hub application. You can use request parameters to\n refine the results for the response object.
", + "smithy.api#documentation": "Lists the assessments for an Resilience Hub application. You can use request\n parameters to refine the results for the response object.
", "smithy.api#http": { "method": "GET", "uri": "/list-app-assessments", @@ -5487,7 +6013,7 @@ "componentCompliances": { "target": "com.amazonaws.resiliencehub#ComponentCompliancesList", "traits": { - "smithy.api#documentation": "The compliances for an Resilience Hub Application Component, returned as an object. This\n object contains the names of the Application Components, compliances, costs, resiliency scores, outage scores, and\n more.
", + "smithy.api#documentation": "The compliances for an Resilience Hub Application Component, returned as an object. This\n object contains the names of the Application Components, compliances, costs, resiliency scores,\n outage scores, and more.
", "smithy.api#required": {} } }, @@ -5568,7 +6094,7 @@ "componentRecommendations": { "target": "com.amazonaws.resiliencehub#ComponentRecommendationList", "traits": { - "smithy.api#documentation": "The recommendations for an Resilience Hub Application Component, returned as an object. This\n object contains the names of the Application Components, configuration recommendations, and recommendation\n statuses.
", + "smithy.api#documentation": "The recommendations for an Resilience Hub Application Component, returned as an object.\n This object contains the names of the Application Components, configuration recommendations, and\n recommendation statuses.
", "smithy.api#required": {} } }, @@ -5606,7 +6132,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all the input sources of the Resilience Hub application. For more information about the\n input sources supported by Resilience Hub, see Discover\n the structure and describe your Resilience Hub application.
", + "smithy.api#documentation": "Lists all the input sources of the Resilience Hub application. For more\n information about the input sources supported by Resilience Hub, see Discover\n the structure and describe your Resilience Hub application.
", "smithy.api#http": { "method": "POST", "uri": "/list-app-input-sources", @@ -5645,7 +6171,7 @@ "maxResults": { "target": "com.amazonaws.resiliencehub#MaxResults", "traits": { - "smithy.api#documentation": "Maximum number of input sources to be displayed per Resilience Hub application.
" + "smithy.api#documentation": "Maximum number of input sources to be displayed per Resilience Hub\n application.
" } } } @@ -6125,21 +6651,21 @@ "fromLastAssessmentTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "Indicates the lower limit of the range that is used to filter applications based on their last assessment times.
", + "smithy.api#documentation": "Indicates the lower limit of the range that is used to filter applications based on their\n last assessment times.
", "smithy.api#httpQuery": "fromLastAssessmentTime" } }, "toLastAssessmentTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "Indicates the upper limit of the range that is used to filter the applications based on their last assessment times.
", + "smithy.api#documentation": "Indicates the upper limit of the range that is used to filter the applications based on\n their last assessment times.
", "smithy.api#httpQuery": "toLastAssessmentTime" } }, "reverseOrder": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "The application list is sorted based on the values of lastAppComplianceEvaluationTime
field. By default, application list is sorted in ascending order. To sort the appliation list in descending order, set this field to True
.
The application list is sorted based on the values of\n lastAppComplianceEvaluationTime
field. By default, application list is sorted\n in ascending order. To sort the application list in descending order, set this field to\n True
.
Lists the resource grouping recommendations suggested by Resilience Hub for your application.
", + "smithy.api#http": { + "method": "GET", + "uri": "/list-resource-grouping-recommendations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "groupingRecommendations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.resiliencehub#ListResourceGroupingRecommendationsRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Null, or the token from a previous call to get the next set of results.
", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.resiliencehub#MaxResults", + "traits": { + "smithy.api#documentation": "Maximum number of grouping recommendations to be displayed per Resilience Hub application.
", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#ListResourceGroupingRecommendationsResponse": { + "type": "structure", + "members": { + "groupingRecommendations": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationList", + "traits": { + "smithy.api#documentation": "List of resource grouping recommendations generated by Resilience Hub.
", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.resiliencehub#NextToken", + "traits": { + "smithy.api#documentation": "Null, or the token from a previous call to get the next set of results.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#ListSopRecommendations": { "type": "operation", "input": { @@ -6384,7 +7001,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the standard operating procedure (SOP) recommendations for the Resilience Hub\n applications.
", + "smithy.api#documentation": "Lists the standard operating procedure (SOP) recommendations for the Resilience Hub applications.
", "smithy.api#http": { "method": "POST", "uri": "/list-sop-recommendations", @@ -6465,7 +7082,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the suggested resiliency policies for the Resilience Hub applications.
", + "smithy.api#documentation": "Lists the suggested resiliency policies for the Resilience Hub\n applications.
", "smithy.api#http": { "method": "GET", "uri": "/list-suggested-resiliency-policies", @@ -6688,7 +7305,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the resources that are not currently supported in Resilience Hub. An unsupported\n resource is a resource that exists in the object that was used to create an app, but is not\n supported by Resilience Hub.
", + "smithy.api#documentation": "Lists the resources that are not currently supported in Resilience Hub. An\n unsupported resource is a resource that exists in the object that was used to create an app,\n but is not supported by Resilience Hub.
", "smithy.api#http": { "method": "POST", "uri": "/list-unsupported-app-version-resources", @@ -7102,7 +7719,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds or updates the app template for an Resilience Hub application draft version.
", + "smithy.api#documentation": "Adds or updates the app template for an Resilience Hub application draft\n version.
", "smithy.api#http": { "method": "POST", "uri": "/put-draft-app-version-template", @@ -7170,6 +7787,10 @@ { "value": "MetCanImprove", "name": "MET_CAN_IMPROVE" + }, + { + "value": "MissingPolicy", + "name": "MISSING_POLICY" } ] } @@ -7439,6 +8060,117 @@ } } }, + "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntry" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 30 + } + } + }, + "com.amazonaws.resiliencehub#RejectGroupingRecommendationEntry": { + "type": "structure", + "members": { + "groupingRecommendationId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "Indicates the identifier of the grouping recommendation.
", + "smithy.api#required": {} + } + }, + "rejectionReason": { + "target": "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason", + "traits": { + "smithy.api#documentation": "Indicates the reason you had selected while rejecting a grouping recommendation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Indicates the rejected grouping recommendation.
" + } + }, + "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Rejects resource grouping recommendations.
", + "smithy.api#http": { + "method": "POST", + "uri": "/reject-resource-grouping-recommendations", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Indicates the list of resource grouping recommendations you have selected to exclude from your application.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#RejectResourceGroupingRecommendationsResponse": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Indicates the list of resource grouping recommendations that failed to get excluded in your application.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#RemoveDraftAppVersionResourceMappings": { "type": "operation", "input": { @@ -7501,7 +8233,7 @@ "appRegistryAppNames": { "target": "com.amazonaws.resiliencehub#EntityNameList", "traits": { - "smithy.api#documentation": "The names of the registered applications you want to remove from the resource mappings.
" + "smithy.api#documentation": "The names of the registered applications you want to remove from the resource\n mappings.
" } }, "resourceGroupNames": { @@ -7519,7 +8251,7 @@ "eksSourceNames": { "target": "com.amazonaws.resiliencehub#String255List", "traits": { - "smithy.api#documentation": "The names of the Amazon Elastic Kubernetes Service clusters and namespaces you want to remove from the resource mappings.
\nThis parameter accepts values in \"eks-cluster/namespace\" format.
\nThe names of the Amazon Elastic Kubernetes Service clusters and namespaces you want to remove from\n the resource mappings.
\nThis parameter accepts values in \"eks-cluster/namespace\" format.
\nThis indicates if there are more errors not listed in the resourceErrors list.
" + "smithy.api#documentation": " This indicates if there are more errors not listed in the\n resourceErrors
\n list.
Starts grouping recommendation task.
", + "smithy.api#http": { + "method": "POST", + "uri": "/start-resource-grouping-recommendation-task", + "code": 200 + } + } + }, + "com.amazonaws.resiliencehub#StartResourceGroupingRecommendationTaskRequest": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: \narn:partition
:resiliencehub:region
:account
:app/app-id
. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.
Indicates the identifier of the grouping recommendation task.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.resiliencehub#ResourcesGroupingRecGenStatusType", + "traits": { + "smithy.api#documentation": "Status of the action.
", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.resiliencehub#String500", + "traits": { + "smithy.api#documentation": "Indicates the error that occurred while executing a grouping recommendation task.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#String1024": { "type": "string", "traits": { @@ -8933,7 +9775,7 @@ "assessmentSchedule": { "target": "com.amazonaws.resiliencehub#AppAssessmentScheduleType", "traits": { - "smithy.api#documentation": "\n Assessment execution schedule with 'Daily' or 'Disabled' values.\n
" + "smithy.api#documentation": "Assessment execution schedule with 'Daily' or 'Disabled' values.
" } }, "permissionModel": { @@ -8945,7 +9787,7 @@ "eventSubscriptions": { "target": "com.amazonaws.resiliencehub#EventSubscriptionList", "traits": { - "smithy.api#documentation": "The list of events you would like to subscribe and get notification for.\n Currently, Resilience Hub supports notifications only for Drift\n detected and Scheduled assessment failure\n events.
" + "smithy.api#documentation": "The list of events you would like to subscribe and get notification for. Currently,\n Resilience Hub supports notifications only for Drift\n detected and Scheduled assessment failure\n events.
" } } } @@ -8991,7 +9833,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the Resilience Hub application version.
\nThis API updates the Resilience Hub application draft version. To use this information\n for running resiliency assessments, you must publish the Resilience Hub application using the\n PublishAppVersion
API.
Updates the Resilience Hub application version.
\nThis API updates the Resilience Hub application draft version. To use this\n information for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
Updates an existing Application Component in the Resilience Hub application.
\nThis API updates the Resilience Hub application draft version. To use this Application Component for running assessments, you must publish the Resilience Hub application using the PublishAppVersion
API.
Updates an existing Application Component in the Resilience Hub application.
\nThis API updates the Resilience Hub application draft version. To use this\n Application Component for running assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
Updates the resource details in the Resilience Hub application.
\nThis action has no effect outside Resilience Hub.
\nThis API updates the Resilience Hub application draft version. To use this resource for running resiliency assessments, you must publish the Resilience Hub application using the PublishAppVersion
API.
To update application version with new physicalResourceID
, you must call\n ResolveAppVersionResources
API.
Updates the resource details in the Resilience Hub application.
\nThis action has no effect outside Resilience Hub.
\nThis API updates the Resilience Hub application draft version. To use this\n resource for running resiliency assessments, you must publish the Resilience Hub\n application using the PublishAppVersion
API.
To update application version with new physicalResourceID
, you must\n call ResolveAppVersionResources
API.
Indicates if a resource is excluded from an Resilience Hub application.
\nYou can exclude only imported resources from an Resilience Hub application.
\nIndicates if a resource is excluded from an Resilience Hub application.
\nYou can exclude only imported resources from an Resilience Hub\n application.
\nUpdates a resiliency policy.
\nResilience Hub allows you to provide a value of zero for rtoInSecs
and\n rpoInSecs
of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value\n zero for rtoInSecs
and rpoInSecs
, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.
Updates a resiliency policy.
\nResilience Hub allows you to provide a value of zero for rtoInSecs
\n and rpoInSecs
of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value zero for\n rtoInSecs
and rpoInSecs
, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.
The tags to attach to the profile.
" } + }, + "acceptRoleSessionName": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Used to determine if a custom role session name will be accepted in a temporary credential request.
" + } } } }, @@ -405,6 +411,7 @@ }, "output": { "profile": { + "acceptRoleSessionName": false, "attributeMappings": [ { "mappingRules": [ @@ -1533,6 +1540,12 @@ "smithy.api#documentation": " Used to determine how long sessions vended using this profile are valid for. See the Expiration
section of the \nCreateSession API documentation\npage for more details. In requests, if this value is not provided, the default value will be 3600.
Used to determine if a custom role session name will be accepted in a temporary credential request.
" + } + }, "attributeMappings": { "target": "com.amazonaws.rolesanywhere#AttributeMappings", "traits": { @@ -1596,6 +1609,7 @@ }, "output": { "profile": { + "acceptRoleSessionName": false, "attributeMappings": [ { "mappingRules": [ @@ -3560,6 +3574,12 @@ "max": 43200 } } + }, + "acceptRoleSessionName": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Used to determine if a custom role session name will be accepted in a temporary credential request.
" + } } } }, diff --git a/models/route-53.json b/models/route-53.json index 85c14d670f..ef1d914499 100644 --- a/models/route-53.json +++ b/models/route-53.json @@ -2175,91 +2175,6 @@ "traits": { "smithy.api#documentation": "Creates, changes, or deletes a resource record set, which contains authoritative DNS\n\t\t\tinformation for a specified domain name or subdomain name. For example, you can use\n\t\t\t\tChangeResourceRecordSets
to create a resource record set that routes\n\t\t\ttraffic for test.example.com to a web server that has an IP address of\n\t\t\t192.0.2.44.
\n Deleting Resource Record Sets\n
\nTo delete a resource record set, you must specify all the same values that you\n\t\t\tspecified when you created it.
\n\n Change Batches and Transactional Changes\n
\nThe request body must include a document with a\n\t\t\t\tChangeResourceRecordSetsRequest
element. The request body contains a\n\t\t\tlist of change items, known as a change batch. Change batches are considered\n\t\t\ttransactional changes. Route 53 validates the changes in the request and then either\n\t\t\tmakes all or none of the changes in the change batch request. This ensures that DNS\n\t\t\trouting isn't adversely affected by partial changes to the resource record sets in a\n\t\t\thosted zone.
For example, suppose a change batch request contains two changes: it deletes the\n\t\t\t\tCNAME
resource record set for www.example.com and creates an alias\n\t\t\tresource record set for www.example.com. If validation for both records succeeds, Route\n\t\t\t53 deletes the first resource record set and creates the second resource record set in a\n\t\t\tsingle operation. If validation for either the DELETE
or the\n\t\t\t\tCREATE
action fails, then the request is canceled, and the original\n\t\t\t\tCNAME
record continues to exist.
If you try to delete the same resource record set more than once in a single\n\t\t\t\tchange batch, Route 53 returns an InvalidChangeBatch
error.
\n Traffic Flow\n
\nTo create resource record sets for complex routing configurations, use either the\n\t\t\ttraffic flow visual editor in the Route 53 console or the API actions for traffic\n\t\t\tpolicies and traffic policy instances. Save the configuration as a traffic policy, then\n\t\t\tassociate the traffic policy with one or more domain names (such as example.com) or\n\t\t\tsubdomain names (such as www.example.com), in the same hosted zone or in multiple hosted\n\t\t\tzones. You can roll back the updates if the new configuration isn't performing as\n\t\t\texpected. For more information, see Using Traffic Flow to Route\n\t\t\t\tDNS Traffic in the Amazon Route 53 Developer\n\t\t\tGuide.
\n\n Create, Delete, and Upsert\n
\nUse ChangeResourceRecordsSetsRequest
to perform the following\n\t\t\tactions:
\n CREATE
: Creates a resource record set that has the specified\n\t\t\t\t\tvalues.
\n DELETE
: Deletes an existing resource record set that has the\n\t\t\t\t\tspecified values.
\n UPSERT
: If a resource set doesn't exist, Route 53 creates it. If a resource\n\t\t\t\t\tset exists Route 53 updates it with the values in the request.
\n Syntaxes for Creating, Updating, and Deleting Resource Record\n\t\t\t\tSets\n
\nThe syntax for a request depends on the type of resource record set that you want to\n\t\t\tcreate, delete, or update, such as weighted, alias, or failover. The XML elements in\n\t\t\tyour request must appear in the order listed in the syntax.
\nFor an example for each type of resource record set, see \"Examples.\"
\nDon't refer to the syntax in the \"Parameter Syntax\" section, which includes\n\t\t\tall of the elements for every kind of resource record set that you can create, delete,\n\t\t\tor update by using ChangeResourceRecordSets
.
\n Change Propagation to Route 53 DNS Servers\n
\nWhen you submit a ChangeResourceRecordSets
request, Route 53 propagates your\n\t\t\tchanges to all of the Route 53 authoritative DNS servers managing the hosted zone. While\n\t\t\tyour changes are propagating, GetChange
returns a status of\n\t\t\t\tPENDING
. When propagation is complete, GetChange
returns a\n\t\t\tstatus of INSYNC
. Changes generally propagate to all Route 53 name servers\n\t\t\tmanaging the hosted zone within 60 seconds. For more information, see GetChange.
\n Limits on ChangeResourceRecordSets Requests\n
\nFor information about the limits on a ChangeResourceRecordSets
request,\n\t\t\tsee Limits in the Amazon Route 53 Developer Guide.
This operation aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.
\nTo verify that all parts have been removed and prevent getting charged for the part\n storage, you should call the ListParts API operation and ensure that\n the parts list is empty.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
\n UploadPart\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThis operation aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.
\nTo verify that all parts have been removed and prevent getting charged for the part\n storage, you should call the ListParts API operation and ensure that\n the parts list is empty.
\n\n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. \n To delete these in-progress multipart uploads, use the\n ListMultipartUploads
operation to list the in-progress multipart\n uploads in the bucket and use the AbortMultupartUpload
operation to\n abort all the in-progress multipart uploads.\n
\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
\n UploadPart\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nCreates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nYou can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\nYou must have\n read access to the source object and write\n access to the destination bucket.
\n\n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have\n \n s3:GetObject
\n \n permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject
\n \n permission to write the object copy to the destination bucket.
\n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession
\n permission in\n the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the \n s3express:CreateSession
\n permission in the\n Action
element of a policy to write the object\n to the destination. The s3express:SessionMode
condition\n key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
\nWhen the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length
. You always need to read the entire response body\n to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied\n object.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.
\nIf the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK
response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK
status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nYou can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.
\nAmazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
\nBoth the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\nYou must have\n read access to the source object and write\n access to the destination bucket.
\n\n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have\n \n s3:GetObject
\n \n permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject
\n \n permission to write the object copy to the destination bucket.
\n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession
\n permission in\n the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the \n s3express:CreateSession
\n permission in the\n Action
element of a policy to write the object\n to the destination. The s3express:SessionMode
condition\n key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
\nWhen the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length
. You always need to read the entire response body\n to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied\n object.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.
\nIf the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK
response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK
status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
The container element for specifying the default Object Lock retention settings for new\n objects placed in the specified bucket.
\nThe DefaultRetention
settings require both a mode and a\n period.
The DefaultRetention
period can be either Days
or\n Years
but you must select one. You cannot specify\n Days
and Years
at the same time.
The container element for optionally specifying the default Object Lock retention settings for new\n objects placed in the specified bucket.
\nThe DefaultRetention
settings require both a mode and a\n period.
The DefaultRetention
period can be either Days
or\n Years
but you must select one. You cannot specify\n Days
and Years
at the same time.
Requests Amazon S3 to encode the object keys in the response and specifies the encoding\n method to use. An object key can contain any Unicode character; however, the XML 1.0 parser\n cannot parse some characters, such as characters with an ASCII value from 0 to 10. For\n characters that are not supported in XML 1.0, you can add this parameter to request that\n Amazon S3 encode the keys in the response.
" + "smithy.api#documentation": "Encoding type used by Amazon S3 to encode the object keys in the response.\n Responses are encoded only in UTF-8. An object key can contain any Unicode character.\n However, the XML 1.0 parser can't parse certain characters, such as characters with an\n ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this\n parameter to request that Amazon S3 encode the keys in the response. For more information about\n characters to avoid in object key names, see Object key naming\n guidelines.
\nWhen using the URL encoding type, non-ASCII characters that are used in an object's\n key name will be percent-encoded according to UTF-8 code values. For example, the object\n test_file(3).png
will appear as\n test_file%283%29.png
.
Specifies encryption-related information for an Amazon S3 bucket that is a destination for\n replicated objects.
" + "smithy.api#documentation": "Specifies encryption-related information for an Amazon S3 bucket that is a destination for\n replicated objects.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nYou can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission\n to access it.
If the bucket does not exist or you do not have permission to access it, the\n HEAD
request returns a generic 400 Bad Request
, 403\n Forbidden
or 404 Not Found
code. A message body is not included, so\n you cannot determine the exception beyond these HTTP response codes.
\n Directory buckets - You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
All HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory bucket - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\n\n General purpose bucket permissions - To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Managing\n access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
\n Directory bucket permissions -\n You must have the \n s3express:CreateSession
\n permission in the\n Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission\n to access it.
If the bucket does not exist or you do not have permission to access it, the\n HEAD
request returns a generic 400 Bad Request
, 403\n Forbidden
or 404 Not Found
code. A message body is not included, so\n you cannot determine the exception beyond these HTTP response codes.
\n General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\n\n General purpose bucket permissions - To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Managing\n access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
\n Directory bucket permissions -\n You must have the \n s3express:CreateSession
\n permission in the\n Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
The Region that the bucket is located.
\nThis functionality is not supported for directory buckets.
\nThe Region that the bucket is located.
", "smithy.api#httpHeader": "x-amz-bucket-region" } }, "AccessPointAlias": { "target": "com.amazonaws.s3#AccessPointAlias", "traits": { - "smithy.api#documentation": "Indicates whether the bucket name used in the request is an access point alias.
\nThis functionality is not supported for directory buckets.
\nIndicates whether the bucket name used in the request is an access point alias.
\nFor directory buckets, the value of this field is false
.
The HEAD
operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not\n Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. \n It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To\n use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.
If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
Encryption request headers, like x-amz-server-side-encryption
,\n should not be sent for HEAD
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD
request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
\n Directory buckets - Delete marker is not supported by directory buckets.
\n\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
\n to the versionId
query parameter in the request.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe HEAD
operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not\n Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. \n It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\n\n General purpose bucket permissions - To\n use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.
If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
Encryption request headers, like x-amz-server-side-encryption
,\n should not be sent for HEAD
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD
request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
\n Directory buckets - Delete marker is not supported by directory buckets.
\n\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
\n to the versionId
query parameter in the request.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe owner of the buckets listed.
" } + }, + "ContinuationToken": { + "target": "com.amazonaws.s3#NextToken", + "traits": { + "smithy.api#documentation": "\n ContinuationToken
is included in the\n response when there are more buckets that can be listed with pagination. The next ListBuckets
request to Amazon S3 can be continued with this ContinuationToken
. ContinuationToken
is obfuscated and is not a real bucket.
Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.
", + "smithy.api#httpQuery": "max-buckets" + } + }, + "ContinuationToken": { + "target": "com.amazonaws.s3#Token", + "traits": { + "smithy.api#documentation": "\n ContinuationToken
indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken
is obfuscated and is not a real\n key. You can use this ContinuationToken
for pagination of the list results.
Length Constraints: Minimum length of 0. Maximum length of 1024.
\nRequired: No.
", + "smithy.api#httpQuery": "continuation-token" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.s3#ListDirectoryBuckets": { "type": "operation", "input": { @@ -26519,7 +26553,7 @@ "ContinuationToken": { "target": "com.amazonaws.s3#DirectoryBucketToken", "traits": { - "smithy.api#documentation": "\n ContinuationToken
indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken
is obfuscated and is not a real\n key. You can use this ContinuationToken
for pagination of the list results.
\n ContinuationToken
indicates to Amazon S3 that the list is being continued on buckets in this account with a token. ContinuationToken
is obfuscated and is not a real\n bucket name. You can use this ContinuationToken
for the pagination of the list results.
This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a\n multipart upload that has been initiated by the CreateMultipartUpload
request, but\n has not yet been completed or aborted.
\n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.\n
\nThe ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart\n uploads is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads
request parameter. If there are more than 1,000 multipart uploads that \n satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element\n with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. \n To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. \n In these requests, include two query parameters: key-marker
and upload-id-marker
. \n Set the value of key-marker
to the NextKeyMarker
value from the previous response. \n Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
\n Directory buckets - The upload-id-marker
element and \n the NextUploadIdMarker
element aren't supported by directory buckets. \n To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
\nTime-based sorting - For uploads that share the same object key, \n they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
\n\n Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys. \n \n
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
\n UploadPart\n
\n\n ListParts\n
\n\n AbortMultipartUpload\n
\nThis operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a\n multipart upload that has been initiated by the CreateMultipartUpload
request, but\n has not yet been completed or aborted.
\n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. \n To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart\n uploads in the bucket and use the AbortMultupartUpload
operation to abort all the in-progress multipart uploads.\n
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart\n uploads is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads
request parameter. If there are more than 1,000 multipart uploads that \n satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element\n with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. \n To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. \n In these requests, include two query parameters: key-marker
and upload-id-marker
. \n Set the value of key-marker
to the NextKeyMarker
value from the previous response. \n Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
\n Directory buckets - The upload-id-marker
element and \n the NextUploadIdMarker
element aren't supported by directory buckets. \n To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
\nTime-based sorting - For uploads that share the same object key, \n they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
\n\n Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys. \n \n
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
\n UploadPart\n
\n\n ListParts\n
\n\n AbortMultipartUpload\n
\nEncoding type used by Amazon S3 to encode object keys in the response. If using\n url
, non-ASCII characters used in an object's key name will be URL encoded.\n For example, the object test_file(3).png
will appear as\n test_file%283%29.png
.
Encoding type used by Amazon S3 to encode the object keys in the response.\n Responses are encoded only in UTF-8. An object key can contain any Unicode character.\n However, the XML 1.0 parser can't parse certain characters, such as characters with an\n ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this\n parameter to request that Amazon S3 encode the keys in the response. For more information about\n characters to avoid in object key names, see Object key naming\n guidelines.
\nWhen using the URL encoding type, non-ASCII characters that are used in an object's\n key name will be percent-encoded according to UTF-8 code values. For example, the object\n test_file(3).png
will appear as\n test_file%283%29.png
.
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can\n use the request parameters as selection criteria to return a subset of the objects in a\n bucket. A 200 OK
response can contain valid or invalid XML. Make sure to\n design your application to parse the contents of the response and handle it appropriately.\n \n For more information about listing objects, see Listing object keys\n programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform\n the s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
\n Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use\n this revised API operation for application development. For backward compatibility, Amazon S3\n continues to support the prior version of this API operation, ListObjects.
\nThe following operations are related to ListObjectsV2
:
\n GetObject\n
\n\n PutObject\n
\n\n CreateBucket\n
\nReturns some or all (up to 1,000) of the objects in a bucket with each request. You can\n use the request parameters as selection criteria to return a subset of the objects in a\n bucket. A 200 OK
response can contain valid or invalid XML. Make sure to\n design your application to parse the contents of the response and handle it appropriately.\n \n For more information about listing objects, see Listing object keys\n programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
\n General purpose bucket - For general purpose buckets, ListObjectsV2
doesn't return prefixes that are related only to in-progress multipart uploads.
\n Directory buckets - \n For directory buckets, ListObjectsV2
response includes the prefixes that are related only to in-progress multipart uploads.\n
\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform\n the s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
\n Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use\n this revised API operation for application development. For backward compatibility, Amazon S3\n continues to support the prior version of this API operation, ListObjects.
\nThe following operations are related to ListObjectsV2
:
\n GetObject\n
\n\n PutObject\n
\n\n CreateBucket\n
\nEncoding type used by Amazon S3 to encode object keys in the response. If using\n url
, non-ASCII characters used in an object's key name will be URL encoded.\n For example, the object test_file(3).png
will appear as\n test_file%283%29.png
.
Encoding type used by Amazon S3 to encode the object keys in the response.\n Responses are encoded only in UTF-8. An object key can contain any Unicode character.\n However, the XML 1.0 parser can't parse certain characters, such as characters with an\n ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this\n parameter to request that Amazon S3 encode the keys in the response. For more information about\n characters to avoid in object key names, see Object key naming\n guidelines.
\nWhen using the URL encoding type, non-ASCII characters that are used in an object's\n key name will be percent-encoded according to UTF-8 code values. For example, the object\n test_file(3).png
will appear as\n test_file%283%29.png
.
Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.
" + "smithy.api#documentation": "Specifies the partition date source for the partitioned prefix.\n PartitionDateSource
can be EventTime
or\n DeliveryTime
.
For DeliveryTime
, the time in the log file names corresponds to the\n delivery time for the log files.
For EventTime
, The logs delivered are for a specific day only. The year,\n month, and day correspond to the day on which the event occurred, and the hour, minutes and\n seconds are set to 00 in the key.
Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE
restricts access to this bucket to only Amazon Web Service principals and authorized users within this account if the bucket has\n a public policy.
Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.
", + "smithy.api#documentation": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE
restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has\n a public policy.
Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.
", "smithy.api#xmlName": "RestrictPublicBuckets" } } @@ -29519,7 +29562,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "This operation is not supported by directory buckets.
\nThis action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nThis operation is not supported by directory buckets.
\nThis action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nAlso, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nThis operation is not supported by directory buckets.
\nSets the versioning state of an existing bucket.
\nYou can set the versioning state with one of the following values:
\n\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.
\n\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.
\nIf the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.
\nIn order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request
header and the Status
and the\n MfaDelete
request elements in a request to set the versioning state of the\n bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.
\nThe following operations are related to PutBucketVersioning
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetBucketVersioning\n
\nThis operation is not supported by directory buckets.
\nWhen you enable versioning on a bucket for the first time, it might take a short\n amount of time for the change to be fully propagated. We recommend that you wait for 15\n minutes after enabling versioning before issuing write operations\n (PUT
\n or\n DELETE
)\n on objects in the bucket.
Sets the versioning state of an existing bucket.
\nYou can set the versioning state with one of the following values:
\n\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.
\n\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.
\nIf the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.
\nIn order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request
header and the Status
and the\n MfaDelete
request elements in a request to set the versioning state of the\n bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.
\nThe following operations are related to PutBucketVersioning
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetBucketVersioning\n
\nThe byte array of partial, one or more result records.
", + "smithy.api#documentation": "The byte array of partial, one or more result records. S3 Select doesn't guarantee that\n a record will be self-contained in one record frame. To ensure continuous streaming of\n data, S3 Select might split the same record across multiple record frames instead of\n aggregating the results in memory. Some S3 clients (for example, the SDK for Java) handle this behavior by creating a ByteStream
out of the response by\n default. Other clients might not handle this behavior by default. In those cases, you must\n aggregate the results on the client side and parse the response.
Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more\n information, see PUT Bucket encryption in\n the Amazon S3 API Reference.
" + "smithy.api#documentation": "Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more\n information, see PUT Bucket encryption in\n the Amazon S3 API Reference.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nSpecifies the default server-side encryption configuration.
" + "smithy.api#documentation": "Specifies the default server-side encryption configuration.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nLists a summary of the properties of an association. An association is an entity that\n links other lineage or experiment entities. An example would be an association between a\n training job and a model.
" } }, + "com.amazonaws.sagemaker#AssumableRoleArns": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#RoleArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.sagemaker#AsyncInferenceClientConfig": { "type": "structure", "members": { @@ -3052,7 +3064,7 @@ "target": "com.amazonaws.sagemaker#AutoMLAlgorithms", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.
\n\n For the tabular problem type TabularJobConfig
:\n
Selected algorithms must belong to the list corresponding to the training mode\n set in AutoMLJobConfig.Mode (ENSEMBLING
or\n HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
In ENSEMBLING
mode:
\"catboost\"
\n\"extra-trees\"
\n\"fastai\"
\n\"lightgbm\"
\n\"linear-learner\"
\n\"nn-torch\"
\n\"randomforest\"
\n\"xgboost\"
\nIn HYPERPARAMETER_TUNING
mode:
\"linear-learner\"
\n\"mlp\"
\n\"xgboost\"
\n\n For the time-series forecasting problem type TimeSeriesForecastingJobConfig
:\n
Choose your algorithms from this list.
\n\"cnn-qr\"
\n\"deepar\"
\n\"prophet\"
\n\"arima\"
\n\"npts\"
\n\"ets\"
\nThe selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.
\n\n For the tabular problem type\n TabularJobConfig
:\n
Selected algorithms must belong to the list corresponding to the training mode\n set in AutoMLJobConfig.Mode (ENSEMBLING
or\n HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
In ENSEMBLING
mode:
\"catboost\"
\n\"extra-trees\"
\n\"fastai\"
\n\"lightgbm\"
\n\"linear-learner\"
\n\"nn-torch\"
\n\"randomforest\"
\n\"xgboost\"
\nIn HYPERPARAMETER_TUNING
mode:
\"linear-learner\"
\n\"mlp\"
\n\"xgboost\"
\n\n For the time-series forecasting problem type\n TimeSeriesForecastingJobConfig
:\n
Choose your algorithms from this list.
\n\"cnn-qr\"
\n\"deepar\"
\n\"prophet\"
\n\"arima\"
\n\"npts\"
\n\"ets\"
\nStores the configuration information for the selection of algorithms trained on tabular data.
\nThe list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set if the training mode is set on AUTO
.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
\n attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is\n empty, CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" + "smithy.api#documentation": "Stores the configuration information for the selection of algorithms trained on tabular\n data.
\nThe list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set if the training mode is set on\n AUTO
.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
\n attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is\n empty, CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" } } }, @@ -3295,6 +3307,20 @@ } } }, + "com.amazonaws.sagemaker#AutoMLComputeConfig": { + "type": "structure", + "members": { + "EmrServerlessComputeConfig": { + "target": "com.amazonaws.sagemaker#EmrServerlessComputeConfig", + "traits": { + "smithy.api#documentation": "The configuration for using EMR Serverless\n to run the AutoML job V2.
\nTo allow your AutoML job V2 to automatically initiate a remote job on EMR Serverless\n when additional compute resources are needed to process large datasets, you need to provide\n an EmrServerlessComputeConfig
object, which includes an\n ExecutionRoleARN
attribute, to the AutoMLComputeConfig
of the\n AutoML job V2 input request.
By seamlessly transitioning to EMR Serverless when required, the AutoML job can handle\n datasets that would otherwise exceed the initially provisioned resources, without any\n manual intervention from you.
\nEMR Serverless is available for the tabular and time series problem types. We\n recommend setting up this option for tabular datasets larger than 5 GB and time series\n datasets larger than 30 GB.
" + } + } + }, + "traits": { + "smithy.api#documentation": "This data type is intended for use exclusively by SageMaker Canvas and cannot be used in\n other contexts at the moment.
\nSpecifies the compute configuration for an AutoML job V2.
" + } + }, "com.amazonaws.sagemaker#AutoMLContainerDefinition": { "type": "structure", "members": { @@ -4171,7 +4197,7 @@ "target": "com.amazonaws.sagemaker#S3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The Amazon S3 output path. Must be 128 characters or less.
", + "smithy.api#documentation": "The Amazon S3 output path. Must be 512 characters or less.
", "smithy.api#required": {} } } @@ -5055,7 +5081,7 @@ "AlgorithmsConfig": { "target": "com.amazonaws.sagemaker#AutoMLAlgorithmsConfig", "traits": { - "smithy.api#documentation": "Your Autopilot job trains a default set of algorithms on your dataset. For tabular and\n time-series data, you can customize the algorithm list by selecting a subset of algorithms\n for your problem type.
\n\n AlgorithmsConfig
stores the customized selection of algorithms to train on\n your data.
\n For the tabular problem type TabularJobConfig
,\n the list of available algorithms to choose from depends on the training mode set\n in \n AutoMLJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set when the training mode\n AutoMLJobConfig.Mode
is set to AUTO
.
When AlgorithmsConfig
is provided, one\n AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for\n AutoMLAlgorithms
is empty,\n CandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
For the list of all algorithms per training mode, see \n AlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.
\n\n For the time-series forecasting problem type TimeSeriesForecastingJobConfig
,\n choose your algorithms from the list provided in\n \n AlgorithmConfig.
For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.
\nWhen AlgorithmsConfig
is provided, one\n AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for\n AutoMLAlgorithms
is empty,\n CandidateGenerationConfig
uses the full set of algorithms for\n time-series forecasting.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for\n time-series forecasting.
Your Autopilot job trains a default set of algorithms on your dataset. For tabular and\n time-series data, you can customize the algorithm list by selecting a subset of algorithms\n for your problem type.
\n\n AlgorithmsConfig
stores the customized selection of algorithms to train on\n your data.
\n For the tabular problem type\n TabularJobConfig
, the list of available algorithms to\n choose from depends on the training mode set in \n AutoMLJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set when the training mode\n AutoMLJobConfig.Mode
is set to AUTO
.
When AlgorithmsConfig
is provided, one\n AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for\n AutoMLAlgorithms
is empty,\n CandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
For the list of all algorithms per training mode, see \n AlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.
\n\n For the time-series forecasting problem type\n TimeSeriesForecastingJobConfig
, choose your algorithms\n from the list provided in \n AlgorithmConfig.
For more information on each algorithm, see the Algorithms\n support for time-series forecasting section in the Autopilot developer\n guide.
\nWhen AlgorithmsConfig
is provided, one\n AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for\n AutoMLAlgorithms
is empty,\n CandidateGenerationConfig
uses the full set of algorithms for\n time-series forecasting.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for\n time-series forecasting.
The generative AI settings for the SageMaker Canvas application.
" } + }, + "EmrServerlessSettings": { + "target": "com.amazonaws.sagemaker#EmrServerlessSettings", + "traits": { + "smithy.api#documentation": "The settings for running Amazon EMR Serverless data processing jobs in SageMaker Canvas.
" + } } }, "traits": { @@ -8907,7 +8939,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.
\nWe recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
" + "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.
\nAn AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.
\nFor more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.
\nWe recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
" } }, "com.amazonaws.sagemaker#CreateAutoMLJobRequest": { @@ -9019,7 +9051,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.
\n\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
For the list of available problem types supported by CreateAutoMLJobV2
, see\n AutoMLProblemTypeConfig.
You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
" + "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.
\nAn AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.
\nFor more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.
\nAutoML jobs V2 support various problem types such as regression, binary, and multiclass\n classification with tabular data, text and image classification, time-series forecasting,\n and fine-tuning of large language models (LLMs) for text generation.
\n\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
For the list of available problem types supported by CreateAutoMLJobV2
, see\n AutoMLProblemTypeConfig.
You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
" } }, "com.amazonaws.sagemaker#CreateAutoMLJobV2Request": { @@ -9094,6 +9126,12 @@ "traits": { "smithy.api#documentation": "This structure specifies how to split the data into train and validation\n datasets.
\nThe validation and training datasets must contain the same headers. For jobs created by\n calling CreateAutoMLJob
, the validation dataset must be less than 2 GB in\n size.
This attribute must not be set for the time-series forecasting problem type, as Autopilot\n automatically splits the input dataset into training and validation sets.
\nSpecifies the compute configuration for the AutoML job V2.
" + } } }, "traits": { @@ -18091,6 +18129,12 @@ "traits": { "smithy.api#documentation": "Returns the security configuration for traffic encryption or Amazon VPC\n settings.
" } + }, + "AutoMLComputeConfig": { + "target": "com.amazonaws.sagemaker#AutoMLComputeConfig", + "traits": { + "smithy.api#documentation": "The compute configuration used for the AutoML job V2.
" + } } }, "traits": { @@ -27145,6 +27189,62 @@ "smithy.api#pattern": "^\\d+$" } }, + "com.amazonaws.sagemaker#EmrServerlessComputeConfig": { + "type": "structure", + "members": { + "ExecutionRoleARN": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ARN of the IAM role granting the AutoML job V2 the necessary\n permissions access policies to list, connect to, or manage EMR Serverless jobs. For\n detailed information about the required permissions of this role, see \"How to configure\n AutoML to initiate a remote job on EMR Serverless for large datasets\" in Create a regression or classification job for tabular data using the AutoML API\n or Create an AutoML job for time-series forecasting using the API.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "This data type is intended for use exclusively by SageMaker Canvas and cannot be used in\n other contexts at the moment.
\nSpecifies the compute configuration for the EMR Serverless job.
" + } + }, + "com.amazonaws.sagemaker#EmrServerlessSettings": { + "type": "structure", + "members": { + "ExecutionRoleArn": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services IAM role that is assumed for\n running Amazon EMR Serverless jobs in SageMaker Canvas. This role should have the necessary\n permissions to read and write data attached and a trust relationship with\n EMR Serverless.
" + } + }, + "Status": { + "target": "com.amazonaws.sagemaker#FeatureStatus", + "traits": { + "smithy.api#documentation": "Describes whether Amazon EMR Serverless job capabilities are enabled or disabled in the SageMaker\n Canvas application.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The settings for running Amazon EMR Serverless jobs in SageMaker Canvas.
" + } + }, + "com.amazonaws.sagemaker#EmrSettings": { + "type": "structure", + "members": { + "AssumableRoleArns": { + "target": "com.amazonaws.sagemaker#AssumableRoleArns", + "traits": { + "smithy.api#documentation": "An array of Amazon Resource Names (ARNs) of the IAM roles that the execution role of\n SageMaker can assume for performing operations or tasks related to Amazon EMR clusters or Amazon EMR\n Serverless applications. These roles define the permissions and access policies required\n when performing Amazon EMR-related operations, such as listing, connecting to, or terminating\n Amazon EMR clusters or Amazon EMR Serverless applications. They are typically used in\n cross-account access scenarios, where the Amazon EMR resources (clusters or serverless\n applications) are located in a different Amazon Web Services account than the SageMaker\n domain.
" + } + }, + "ExecutionRoleArns": { + "target": "com.amazonaws.sagemaker#ExecutionRoleArns", + "traits": { + "smithy.api#documentation": "An array of Amazon Resource Names (ARNs) of the IAM roles used by the Amazon EMR cluster instances\n or job execution environments to access other Amazon Web Services services and resources needed during the \n runtime of your Amazon EMR or Amazon EMR Serverless workloads, such as Amazon S3 for data access, Amazon CloudWatch for logging, or other\n Amazon Web Services services based on the particular workload requirements.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration parameters that specify the IAM roles assumed by the execution role of \n SageMaker (assumable roles) and the cluster instances or job execution environments \n (execution roles or runtime roles) to manage and access resources required for running Amazon EMR\n clusters or Amazon EMR Serverless applications.
" + } + }, "com.amazonaws.sagemaker#EnableCapture": { "type": "boolean" }, @@ -27349,6 +27449,20 @@ } } }, + "com.amazonaws.sagemaker#EndpointConfigStepMetadata": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.sagemaker#EndpointConfigArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the endpoint configuration used in the step.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Metadata for an endpoint configuration step.
" + } + }, "com.amazonaws.sagemaker#EndpointConfigSummary": { "type": "structure", "members": { @@ -27730,6 +27844,20 @@ } } }, + "com.amazonaws.sagemaker#EndpointStepMetadata": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.sagemaker#EndpointArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the endpoint in the step.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Metadata for an endpoint step.
" + } + }, "com.amazonaws.sagemaker#EndpointSummary": { "type": "structure", "members": { @@ -27918,6 +28046,18 @@ } } }, + "com.amazonaws.sagemaker#ExecutionRoleArns": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#RoleArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig": { "type": "enum", "members": { @@ -35095,6 +35235,12 @@ "traits": { "smithy.api#documentation": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.
" } + }, + "EmrSettings": { + "target": "com.amazonaws.sagemaker#EmrSettings", + "traits": { + "smithy.api#documentation": "The configuration parameters that specify the IAM roles assumed by the execution role of \n SageMaker (assumable roles) and the cluster instances or job execution environments \n (execution roles or runtime roles) to manage and access resources required for running Amazon EMR\n clusters or Amazon EMR Serverless applications.
" + } } }, "traits": { @@ -44263,6 +44409,12 @@ "traits": { "smithy.api#enumValue": "Projects" } + }, + "INFERENCE_OPTIMIZATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InferenceOptimization" + } } } }, @@ -50842,6 +50994,18 @@ "traits": { "smithy.api#documentation": "The Amazon Resource Name (ARN) of the AutoML job that was run by this step.
" } + }, + "Endpoint": { + "target": "com.amazonaws.sagemaker#EndpointStepMetadata", + "traits": { + "smithy.api#documentation": "The endpoint that was invoked during this step execution.
" + } + }, + "EndpointConfig": { + "target": "com.amazonaws.sagemaker#EndpointConfigStepMetadata", + "traits": { + "smithy.api#documentation": "The endpoint configuration used to create an endpoint during this step execution.
" + } } }, "traits": { @@ -52105,9 +52269,7 @@ "LocalPath": { "target": "com.amazonaws.sagemaker#ProcessingLocalPath", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The local path of a directory where you want Amazon SageMaker to upload its contents to Amazon S3. \n LocalPath
is an absolute path to a directory containing output files. \n This directory will be created by the platform and exist when your container's \n entrypoint is invoked.
The local path of a directory where you want Amazon SageMaker to upload its contents to Amazon S3. \n LocalPath
is an absolute path to a directory containing output files. \n This directory will be created by the platform and exist when your container's \n entrypoint is invoked.
Specifies an option from a collection of preconfigured Amazon Machine Image (AMI)\n images. Each image is configured by Amazon Web Services with a set of software and driver\n versions. Amazon Web Services optimizes these configurations for different machine\n learning workloads.
\nBy selecting an AMI version, you can ensure that your inference environment is\n compatible with specific software requirements, such as CUDA driver versions, Linux\n kernel versions, or Amazon Web Services Neuron driver versions.
" + "smithy.api#documentation": "Specifies an option from a collection of preconfigured Amazon Machine Image (AMI)\n images. Each image is configured by Amazon Web Services with a set of software and driver\n versions. Amazon Web Services optimizes these configurations for different machine\n learning workloads.
\nBy selecting an AMI version, you can ensure that your inference environment is\n compatible with specific software requirements, such as CUDA driver versions, Linux\n kernel versions, or Amazon Web Services Neuron driver versions.
\nThe AMI version names, and their configurations, are the following:
\nAccelerator: GPU
\nNVIDIA driver version: 535.54.03
\nCUDA driver version: 12.2
\nSupported instance types: ml.g4dn.*, ml.g5.*, ml.g6.*, ml.p3.*,\n ml.p4d.*, ml.p4de.*, ml.p5.*
\nA description of the types of email that you plan to send.
", - "smithy.api#required": {} + "smithy.api#documentation": "A description of the types of email that you plan to send.
" } }, "AdditionalContactEmailAddresses": { @@ -12462,8 +12461,11 @@ "com.amazonaws.sesv2#UseCaseDescription": { "type": "string", "traits": { + "smithy.api#deprecated": { + "message": "Use case description is optional and deprecated" + }, "smithy.api#length": { - "min": 1, + "min": 0, "max": 5000 }, "smithy.api#sensitive": {} diff --git a/models/sfn.json b/models/sfn.json index ec94cc0dae..09f12b100c 100644 --- a/models/sfn.json +++ b/models/sfn.json @@ -158,7 +158,7 @@ "name": "states" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "Step Functions is a service that lets you coordinate the components of distributed applications\n and microservices using visual workflows.
\nYou can use Step Functions to build applications from individual components, each of which performs\n a discrete function, or task, allowing you to scale and change\n applications quickly. Step Functions provides a console that helps visualize the components of your\n application as a series of steps. Step Functions automatically triggers and tracks each step, and\n retries steps when there are errors, so your application executes predictably and in the right\n order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any\n issues.
\nStep Functions manages operations and underlying infrastructure to ensure your application is\n available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has\n access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API.\n For more information about Step Functions, see the \n Step Functions Developer Guide\n .
\nIf you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution
and specify its parameter as StateMachineArn
.
Step Functions coordinates the components of distributed applications\n and microservices using visual workflows.
\nYou can use Step Functions to build applications from individual components, each of which performs\n a discrete function, or task, allowing you to scale and change\n applications quickly. Step Functions provides a console that helps visualize the components of your\n application as a series of steps. Step Functions automatically triggers and tracks each step, and\n retries steps when there are errors, so your application executes predictably and in the right\n order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any\n issues.
\nStep Functions manages operations and underlying infrastructure to ensure your application is\n available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has\n access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API.\n For more information about Step Functions, see the \n Step Functions Developer Guide\n .
\nIf you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution
and specify its parameter as StateMachineArn
.
Activity already exists. EncryptionConfiguration
may not be updated.
The list of tags to add to a resource.
\nAn array of key-value pairs. For more information, see Using\n Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User\n Guide, and Controlling Access Using IAM\n Tags.
\nTags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @
.
Settings to configure server-side encryption.
" + } } }, "traits": { @@ -1598,6 +1628,9 @@ { "target": "com.amazonaws.sfn#InvalidDefinition" }, + { + "target": "com.amazonaws.sfn#InvalidEncryptionConfiguration" + }, { "target": "com.amazonaws.sfn#InvalidLoggingConfiguration" }, @@ -1607,6 +1640,12 @@ { "target": "com.amazonaws.sfn#InvalidTracingConfiguration" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#StateMachineAlreadyExists" }, @@ -1627,7 +1666,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a state machine. A state machine consists of a collection of states that can do\n work (Task
states), determine to which states to transition next\n (Choice
states), stop an execution with an error (Fail
states),\n and so on. State machines are specified using a JSON-based, structured language. For more\n information, see Amazon States\n Language in the Step Functions User Guide.
If you set the publish
parameter of this API action to true
, it\n publishes version 1
as the first revision of the state machine.
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
\n\n CreateStateMachine
is an idempotent API. Subsequent requests won’t create a\n duplicate resource if it was already created. CreateStateMachine
's idempotency\n check is based on the state machine name
, definition
,\n type
, LoggingConfiguration
, and\n TracingConfiguration
. The check is also based on the publish
and versionDescription
parameters. If a following request has a different\n roleArn
or tags
, Step Functions will ignore these differences and treat\n it as an idempotent request of the previous. In this case, roleArn
and\n tags
will not be updated, even if they are different.
Creates a state machine. A state machine consists of a collection of states that can do\n work (Task
states), determine to which states to transition next\n (Choice
states), stop an execution with an error (Fail
states),\n and so on. State machines are specified using a JSON-based, structured language. For more\n information, see Amazon States\n Language in the Step Functions User Guide.
If you set the publish
parameter of this API action to true
, it\n publishes version 1
as the first revision of the state machine.
\n For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine. The execution history and state machine definition will be encrypted with the key applied to the State Machine.\n
\nThis operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
\n\n CreateStateMachine
is an idempotent API. Subsequent requests won’t create a\n duplicate resource if it was already created. CreateStateMachine
's idempotency\n check is based on the state machine name
, definition
,\n type
, LoggingConfiguration
,\n TracingConfiguration
, and EncryptionConfiguration
The check is also based on the publish
and versionDescription
parameters. If a following request has a different\n roleArn
or tags
, Step Functions will ignore these differences and treat\n it as an idempotent request of the previous. In this case, roleArn
and\n tags
will not be updated, even if they are different.
Sets description about the state machine version. You can only set the description if the publish
parameter is set to true
. Otherwise, if you set versionDescription
, but publish
to false
, this API action throws ValidationException
.
Settings to configure server-side encryption.
" + } } }, "traits": { @@ -1969,7 +2014,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a state machine version. After\n you delete a version, you can't call StartExecution using that version's ARN\n or use\n the\n version with a state machine alias.
\nDeleting a state machine version won't terminate its in-progress executions.
\nYou can't delete a state machine version currently referenced by one or more aliases. Before you delete a version, you must either delete the aliases or update them to point to another state machine version.
\n\n Related operations:\n
\nDeletes a state machine version. After\n you delete a version, you can't call StartExecution using that version's ARN\n or use the version with a state machine alias.
\nDeleting a state machine version won't terminate its in-progress executions.
\nYou can't delete a state machine version currently referenced by one or more aliases. Before you delete a version, you must either delete the aliases or update them to point to another state machine version.
\n\n Related operations:\n
\nThe date the activity is created.
", "smithy.api#required": {} } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "Settings for configured server-side encryption.
" + } } }, "traits": { @@ -2072,6 +2123,15 @@ }, { "target": "com.amazonaws.sfn#InvalidArn" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { @@ -2087,6 +2147,12 @@ "smithy.api#documentation": "The Amazon Resource Name (ARN) of the execution to describe.
", "smithy.api#required": {} } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt
permission to decrypt the definition. Alternatively, you can call DescribeStateMachine API with includedData = METADATA_ONLY
to get a successful response without the encrypted definition.
Provides information about a state machine's definition, its execution role ARN, and\n configuration. If a Map Run dispatched the execution, this action returns the Map Run\n Amazon Resource Name (ARN) in the response.\n The\n state machine returned is the state machine associated with the\n Map Run.
\nThis operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
\nThis API action is not supported by EXPRESS
state machines.
Provides information about a state machine's definition, its execution role ARN, and\n configuration. If a Map Run dispatched the execution, this action returns the Map Run\n Amazon Resource Name (ARN) in the response. The state machine returned is the state machine associated with the\n Map Run.
\nThis operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
\nThis API action is not supported by EXPRESS
state machines.
The Amazon Resource Name (ARN) of the execution you want state machine information for.
", "smithy.api#required": {} } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt
permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY
to get a successful response without the encrypted definition.
The revision identifier for the state machine. The first revision ID when you create the state machine is null.
\nUse the state machine revisionId
parameter to compare the revision of a state machine with the configuration of the state machine used for executions without performing a diff of the properties, such as definition
and roleArn
.
Settings to configure server-side encryption.
" + } } }, "traits": { @@ -2562,6 +2658,12 @@ "smithy.api#documentation": "The Amazon Resource Name (ARN) of the state machine for which you want the information.
\nIf you specify a state machine version ARN, this API returns details about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1
.
If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt
permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY
to get a successful response without the encrypted definition.
\n When calling a labelled ARN for an encrypted state machine, the includedData = METADATA_ONLY
parameter will not apply because Step Functions needs to decrypt the entire state machine definition to get the Distributed Map state’s definition. In this case, the API caller needs to have kms:Decrypt
permission.\n
The Amazon States Language definition of the state machine. See Amazon States Language.
", + "smithy.api#documentation": "The Amazon States Language definition of the state machine. See Amazon States Language.
\nIf called with includedData = METADATA_ONLY
, the returned definition will be {}
.
The description of the state machine version.
" } + }, + "encryptionConfiguration": { + "target": "com.amazonaws.sfn#EncryptionConfiguration", + "traits": { + "smithy.api#documentation": "Settings to configure server-side encryption.
" + } } }, "traits": { @@ -2657,6 +2765,50 @@ "smithy.api#default": false } }, + "com.amazonaws.sfn#EncryptionConfiguration": { + "type": "structure", + "members": { + "kmsKeyId": { + "target": "com.amazonaws.sfn#KmsKeyId", + "traits": { + "smithy.api#documentation": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption KMS key to encrypt data. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.
" + } + }, + "kmsDataKeyReusePeriodSeconds": { + "target": "com.amazonaws.sfn#KmsDataKeyReusePeriodSeconds", + "traits": { + "smithy.api#documentation": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call GenerateDataKey
. Only applies to customer managed keys.
Encryption type
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Settings to configure server-side encryption.
\n\n For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines and activities. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine, and when creating an Activity. The execution history and state machine definition will be encrypted with the key applied to the State Machine. Activity inputs will be encrypted with the key applied to the Activity.\n
\nStep Functions automatically enables encryption at rest using Amazon Web Services owned keys at no charge. However, KMS charges apply when using a customer managed key. For more information about pricing, see Key Management Service pricing.
\nFor more information on KMS, see What is Key Management Service?\n
" + } + }, + "com.amazonaws.sfn#EncryptionType": { + "type": "enum", + "members": { + "AWS_OWNED_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_OWNED_KEY" + } + }, + "CUSTOMER_MANAGED_KMS_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMER_MANAGED_KMS_KEY" + } + } + } + }, "com.amazonaws.sfn#ErrorMessage": { "type": "string" }, @@ -3036,6 +3188,15 @@ }, { "target": "com.amazonaws.sfn#InvalidArn" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { @@ -3100,6 +3261,15 @@ }, { "target": "com.amazonaws.sfn#InvalidToken" + }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" } ], "traits": { @@ -3810,6 +3980,23 @@ "com.amazonaws.sfn#IncludeExecutionDataGetExecutionHistory": { "type": "boolean" }, + "com.amazonaws.sfn#IncludedData": { + "type": "enum", + "members": { + "ALL_DATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_DATA" + } + }, + "METADATA_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "METADATA_ONLY" + } + } + } + }, "com.amazonaws.sfn#InspectionData": { "type": "structure", "members": { @@ -3990,6 +4177,18 @@ "smithy.api#error": "client" } }, + "com.amazonaws.sfn#InvalidEncryptionConfiguration": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "Received when encryptionConfiguration
is specified but various conditions exist which make the configuration invalid. For example, if type
is set to CUSTOMER_MANAGED_KMS_KEY
, but kmsKeyId
is null, or kmsDataKeyReusePeriodSeconds
is not between 60 and 900, or the KMS key is not symmetric or inactive.
Configuration is not valid.
", "smithy.api#error": "client" } }, @@ -4062,6 +4261,101 @@ "smithy.api#error": "client" } }, + "com.amazonaws.sfn#KmsAccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "Either your KMS key policy or API caller does not have the required permissions.
", + "smithy.api#error": "client" + } + }, + "com.amazonaws.sfn#KmsDataKeyReusePeriodSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 60, + "max": 900 + } + } + }, + "com.amazonaws.sfn#KmsInvalidStateException": { + "type": "structure", + "members": { + "kmsKeyState": { + "target": "com.amazonaws.sfn#KmsKeyState", + "traits": { + "smithy.api#documentation": "Current status of the KMS; key. For example: DISABLED
, PENDING_DELETION
, PENDING_IMPORT
, UNAVAILABLE
, CREATING
.
The KMS key is not in valid state, for example: Disabled or Deleted.
", + "smithy.api#error": "client" + } + }, + "com.amazonaws.sfn#KmsKeyId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.sfn#KmsKeyState": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "PENDING_DELETION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_DELETION" + } + }, + "PENDING_IMPORT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_IMPORT" + } + }, + "UNAVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNAVAILABLE" + } + }, + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + } + } + }, + "com.amazonaws.sfn#KmsThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.sfn#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "Received when KMS returns ThrottlingException
for a KMS call that Step Functions makes on behalf of the caller.
The percentage of traffic you want to route to a state machine\n version. The sum of the weights in the routing\n configuration must be equal to 100.
", + "smithy.api#documentation": "The percentage of traffic you want to route to a state machine version. The sum of the\n weights in the routing configuration must be equal to 100.
", "smithy.api#required": {} } } @@ -5445,6 +5739,15 @@ { "target": "com.amazonaws.sfn#InvalidToken" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#TaskDoesNotExist" }, @@ -5453,7 +5756,7 @@ } ], "traits": { - "smithy.api#documentation": "Used by activity workers, Task states using the callback\n pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken
failed.
Used by activity workers, Task states using the callback\n pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken
failed.
For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role.
\nA caller can mark a task as fail without using any KMS permissions in the execution role if the caller provides a null value for both error
and cause
fields because no data needs to be encrypted.
Optional name of the execution.\n This name must be unique for your Amazon Web Services account, Region, and state machine for 90 days. For more information,\n see \n Limits Related to State Machine Executions in the Step Functions Developer Guide.
\nIf you don't provide a name for the execution, Step Functions automatically generates a universally unique identifier (UUID) as the execution name.
\nA name must not contain:
\nwhite space
\nbrackets < > { } [ ]
\n
wildcard characters ? *
\n
special characters \" # % \\ ^ | ~ ` $ & , ; : /
\n
control characters (U+0000-001F
, U+007F-009F
)
To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.
" + "smithy.api#documentation": "Optional name of the execution. This name must be unique for your Amazon Web Services account, Region, and state machine for 90 days. For more information,\n see \n Limits Related to State Machine Executions in the Step Functions Developer Guide.
\nIf you don't provide a name for the execution, Step Functions automatically generates a universally unique identifier (UUID) as the execution name.
\nA name must not contain:
\nwhite space
\nbrackets < > { } [ ]
\n
wildcard characters ? *
\n
special characters \" # % \\ ^ | ~ ` $ & , ; : /
\n
control characters (U+0000-001F
, U+007F-009F
)
To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.
" } }, "input": { @@ -5755,6 +6076,15 @@ { "target": "com.amazonaws.sfn#InvalidName" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#StateMachineDeleting" }, @@ -5799,6 +6129,12 @@ "traits": { "smithy.api#documentation": "Passes the X-Ray trace header. The trace header can also be passed in the request\n payload.
" } + }, + "includedData": { + "target": "com.amazonaws.sfn#IncludedData", + "traits": { + "smithy.api#documentation": "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt
permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY
to get a successful response without the encrypted definition.
State machine type is not supported.
", "smithy.api#error": "client" } }, @@ -6165,12 +6501,21 @@ { "target": "com.amazonaws.sfn#InvalidArn" }, + { + "target": "com.amazonaws.sfn#KmsAccessDeniedException" + }, + { + "target": "com.amazonaws.sfn#KmsInvalidStateException" + }, + { + "target": "com.amazonaws.sfn#KmsThrottlingException" + }, { "target": "com.amazonaws.sfn#ValidationException" } ], "traits": { - "smithy.api#documentation": "Stops an execution.
\nThis API action is not supported by EXPRESS
state machines.
Stops an execution.
\nThis API action is not supported by EXPRESS
state machines.
For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role.
\nA caller can stop an execution without using any KMS permissions in the execution role if the caller provides a null value for both error
and cause
fields because no data needs to be encrypted.
Updates an existing state machine by modifying its definition
,\n roleArn
, or loggingConfiguration
. Running executions will continue\n to use the previous definition
and roleArn
. You must include at\n least one of definition
or roleArn
or you will receive a\n MissingRequiredParameter
error.
A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel
refers to a Distributed Map state with a label mapStateLabel
in the state machine named stateMachineName
.
A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.
\nThe following are some examples of qualified and unqualified state machine ARNs:
\nThe following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel
in a state machine named myStateMachine
.
\n arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel
\n
If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException
.
The following qualified state machine ARN refers to an alias named PROD
.
\n arn:
\n
If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.
\nThe following unqualified state machine ARN refers to a state machine named myStateMachine
.
\n arn:
\n
After you update your state machine, you can set the publish
parameter to\n true
in the same action to publish a new version. This\n way, you can opt-in to strict versioning of your state machine.
Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.
\nAll StartExecution
calls within a few seconds use the updated\n definition
and roleArn
. Executions started immediately after you\n call UpdateStateMachine
may use the previous state machine\n definition
and roleArn
.
Updates an existing state machine by modifying its definition
,\n roleArn
, loggingConfiguration
, or EncryptionConfiguration
. Running executions will continue\n to use the previous definition
and roleArn
. You must include at\n least one of definition
or roleArn
or you will receive a\n MissingRequiredParameter
error.
A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel
refers to a Distributed Map state with a label mapStateLabel
in the state machine named stateMachineName
.
A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.
\nThe following are some examples of qualified and unqualified state machine ARNs:
\nThe following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel
in a state machine named myStateMachine
.
\n arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel
\n
If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException
.
The following qualified state machine ARN refers to an alias named PROD
.
\n arn:
\n
If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.
\nThe following unqualified state machine ARN refers to a state machine named myStateMachine
.
\n arn:
\n
After you update your state machine, you can set the publish
parameter to\n true
in the same action to publish a new version. This\n way, you can opt-in to strict versioning of your state machine.
Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.
\nAll StartExecution
calls within a few seconds use the updated\n definition
and roleArn
. Executions started immediately after you\n call UpdateStateMachine
may use the previous state machine\n definition
and roleArn
.
An optional description of the state machine version to publish.
\nYou can only specify the versionDescription
parameter if you've set publish
to true
.
Settings to configure server-side encryption.
" + } } }, "traits": { diff --git a/models/ssm-quicksetup.json b/models/ssm-quicksetup.json new file mode 100644 index 0000000000..be928d7205 --- /dev/null +++ b/models/ssm-quicksetup.json @@ -0,0 +1,2223 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.ssmquicksetup#AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "The requester has insufficient permissions to perform the operation.
", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinition": { + "type": "structure", + "members": { + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of the Quick Setup configuration.
", + "smithy.api#pattern": "^[a-zA-Z0-9_\\-.:/]{3,200}$", + "smithy.api#required": {} + } + }, + "Parameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "A list of key-value pairs containing the required parameters for the configuration\n type.
", + "smithy.api#required": {} + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The version of the Quick Setup type used.
", + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "LocalDeploymentExecutionRoleName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the IAM role used to deploy local\n configurations.
", + "smithy.api#pattern": "^[\\w+=,.@-]{1,64}$" + } + }, + "LocalDeploymentAdministrationRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "The ARN of the IAM role used to administrate local configuration\n deployments.
" + } + }, + "Id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the configuration definition.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The definition of a Quick Setup configuration.
" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionInput": { + "type": "structure", + "members": { + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of the Quick Setup configuration.
", + "smithy.api#pattern": "^[a-zA-Z0-9_\\-.:/]{3,200}$", + "smithy.api#required": {} + } + }, + "Parameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "The parameters for the configuration definition type. Parameters for configuration\n definitions vary based the configuration type. The following tables outline the\n parameters for each configuration type.
\n\n DelegatedAccountId
\n
Description: (Required) The ID of the\n delegated administrator account.
\n\n TargetOrganizationalUnits
\n
Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n TargetTagKey
\n
Description: (Required) The tag key assigned\n to the instances you want to target.
\n\n TargetTagValue
\n
Description: (Required) The value of the tag\n key assigned to the instances you want to\n target.
\n\n ICalendarString
\n
Description: (Required) An iCalendar\n formatted string containing the schedule you want\n Change Manager to use.
\n\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n UpdateSSMAgent
\n
Description: (Optional) A boolean value that\n determines whether the SSM Agent is updated on the\n target instances every 2 weeks. The default value\n is \"true
\".
\n TargetOrganizationalUnits
\n
Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n SelectedAggregatorRegion
\n
Description: (Required) The Amazon Web Services Region where you want to create the\n aggregator index.
\n\n ReplaceExistingAggregator
\n
Description: (Required) A boolean value that\n determines whether to demote an existing\n aggregator if it is in a Region that differs from\n the value you specify for the\n SelectedAggregatorRegion
.
\n TargetOrganizationalUnits
\n
Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n DelegatedAccountId
\n
Description: (Required) The ID of the\n delegated administrator account.
\n\n JobFunction
\n
Description: (Required) The name for the\n Change Manager job function.
\n\n PermissionType
\n
Description: (Optional) Specifies whether\n you want to use default administrator permissions\n for the job function role, or provide a custom\n IAM policy. The valid values are\n CustomPermissions
and\n AdminPermissions
. The default value\n for the parameter is\n CustomerPermissions
.
\n CustomPermissions
\n
Description: (Optional) A JSON string\n containing the IAM policy you want\n your job function to use. You must provide a value\n for this parameter if you specify\n CustomPermissions
for the\n PermissionType
parameter.
\n TargetOrganizationalUnits
\n
Description: (Required) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n AnalyseAllResources
\n
Description: (Optional) A boolean value that\n determines whether DevOps Guru analyzes all\n CloudFormation stacks in the account. The\n default value is \"false
\".
\n EnableSnsNotifications
\n
Description: (Optional) A boolean value that\n determines whether DevOps Guru sends\n notifications when an insight is created. The\n default value is \"true
\".
\n EnableSsmOpsItems
\n
Description: (Optional) A boolean value that\n determines whether DevOps Guru creates an\n OpsCenter OpsItem when an insight is created. The\n default value is \"true
\".
\n EnableDriftRemediation
\n
Description: (Optional) A boolean value that\n determines whether a drift remediation schedule is\n used. The default value is\n \"false
\".
\n RemediationSchedule
\n
Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days)
,\n rate(14 days)
, rate(1\n days)
, and none
. The default\n value is \"none
\".
\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n DelegatedAccountId
\n
Description: (Optional) The ID of the\n delegated administrator account. This parameter is\n required for Organization deployments.
\n\n RemediationSchedule
\n
Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days)
,\n rate(14 days)
, rate(2\n days)
, and none
. The default\n value is \"none
\".
\n CPackNames
\n
Description: (Required) A comma separated\n list of Config conformance\n packs.
\n\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) The ID of the root\n of your Organization. This configuration type\n doesn't currently support choosing specific OUs.\n The configuration will be deployed to all the OUs\n in the Organization.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n RecordAllResources
\n
Description: (Optional) A boolean value that\n determines whether all supported resources are\n recorded. The default value is\n \"true
\".
\n ResourceTypesToRecord
\n
Description: (Optional) A comma separated\n list of resource types you want to record.
\n\n RecordGlobalResourceTypes
\n
Description: (Optional) A boolean value that\n determines whether global resources are recorded\n with all resource configurations. The default\n value is \"false
\".
\n GlobalResourceTypesRegion
\n
Description: (Optional) Determines the\n Amazon Web Services Region where global resources\n are recorded.
\n\n UseCustomBucket
\n
Description: (Optional) A boolean value that\n determines whether a custom Amazon S3\n bucket is used for delivery. The default value is\n \"false
\".
\n DeliveryBucketName
\n
Description: (Optional) The name of the\n Amazon S3 bucket you want Config to deliver configuration snapshots and\n configuration history files to.
\n\n DeliveryBucketPrefix
\n
Description: (Optional) The key prefix you\n want to use in the custom Amazon S3\n bucket.
\n\n NotificationOptions
\n
Description: (Optional) Determines the\n notification configuration for the recorder. The\n valid values are NoStreaming
,\n UseExistingTopic
, and\n CreateTopic
. The default value is\n NoStreaming
.
\n CustomDeliveryTopicAccountId
\n
Description: (Optional) The ID of the\n Amazon Web Services account where the Amazon SNS topic you want to use for notifications\n resides. You must specify a value for this\n parameter if you use the\n UseExistingTopic
notification\n option.
\n CustomDeliveryTopicName
\n
Description: (Optional) The name of the\n Amazon SNS topic you want to use for\n notifications. You must specify a value for this\n parameter if you use the\n UseExistingTopic
notification\n option.
\n RemediationSchedule
\n
Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days)
,\n rate(7 days)
, rate(1\n days)
, and none
. The default\n value is \"none
\".
\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) The ID of the root\n of your Organization. This configuration type\n doesn't currently support choosing specific OUs.\n The configuration will be deployed to all the OUs\n in the Organization.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n UpdateSSMAgent
\n
Description: (Optional) A boolean value that\n determines whether the SSM Agent is updated on the\n target instances every 2 weeks. The default value\n is \"true
\".
\n UpdateEc2LaunchAgent
\n
Description: (Optional) A boolean value that\n determines whether the EC2 Launch agent is updated\n on the target instances every month. The default\n value is \"false
\".
\n CollectInventory
\n
Description: (Optional) A boolean value that\n determines whether the EC2 Launch agent is updated\n on the target instances every month. The default\n value is \"true
\".
\n ScanInstances
\n
Description: (Optional) A boolean value that\n determines whether the target instances are\n scanned daily for available patches. The default\n value is \"true
\".
\n InstallCloudWatchAgent
\n
Description: (Optional) A boolean value that\n determines whether the Amazon CloudWatch agent\n is installed on the target instances. The default\n value is \"false
\".
\n UpdateCloudWatchAgent
\n
Description: (Optional) A boolean value that\n determines whether the Amazon CloudWatch agent\n is updated on the target instances every month.\n The default value is \"false
\".
\n IsPolicyAttachAllowed
\n
Description: (Optional) A boolean value that\n determines whether Quick Setup attaches policies\n to instances profiles already associated with the\n target instances. The default value is\n \"false
\".
\n TargetType
\n
Description: (Optional) Determines how\n instances are targeted for local account\n deployments. Don't specify a value for this\n parameter if you're deploying to OUs. The valid\n values are *
,\n InstanceIds
,\n ResourceGroups
, and\n Tags
. Use *
to target\n all instances in the account.
\n TargetInstances
\n
Description: (Optional) A comma separated\n list of instance IDs. You must provide a value for\n this parameter if you specify\n InstanceIds
for the\n TargetType
parameter.
\n TargetTagKey
\n
Description: (Optional) The tag key assigned\n to the instances you want to target. You must\n provide a value for this parameter if you specify\n Tags
for the TargetType
\n parameter.
\n TargetTagValue
\n
Description: (Optional) The value of the tag\n key assigned to the instances you want to target.\n You must provide a value for this parameter if you\n specify Tags
for the\n TargetType
parameter.
\n ResourceGroupName
\n
Description: (Optional) The name of the\n resource group associated with the instances you\n want to target. You must provide a value for this\n parameter if you specify\n ResourceGroups
for the\n TargetType
parameter.
\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n PackagesToInstall
\n
Description: (Required) A comma separated\n list of packages you want to install on the target\n instances. The valid values are\n AWSEFSTools
, AWSCWAgent
,\n and AWSEC2LaunchAgent
.
\n RemediationSchedule
\n
Description: (Optional) A rate expression\n that defines the schedule for drift remediation.\n The valid values are rate(30 days)
,\n rate(14 days)
, rate(2\n days)
, and none
. The default\n value is \"rate(30 days)
\".
\n IsPolicyAttachAllowed
\n
Description: (Optional) A boolean value that\n determines whether Quick Setup attaches policies\n to instances profiles already associated with the\n target instances. The default value is\n \"false
\".
\n TargetType
\n
Description: (Optional) Determines how\n instances are targeted for local account\n deployments. Don't specify a value for this\n parameter if you're deploying to OUs. The valid\n values are *
,\n InstanceIds
,\n ResourceGroups
, and\n Tags
. Use *
to target\n all instances in the account.
\n TargetInstances
\n
Description: (Optional) A comma separated\n list of instance IDs. You must provide a value for\n this parameter if you specify\n InstanceIds
for the\n TargetType
parameter.
\n TargetTagKey
\n
Description: (Required) The tag key assigned\n to the instances you want to target. You must\n provide a value for this parameter if you specify\n Tags
for the TargetType
\n parameter.
\n TargetTagValue
\n
Description: (Required) The value of the tag\n key assigned to the instances you want to target.\n You must provide a value for this parameter if you\n specify Tags
for the\n TargetType
parameter.
\n ResourceGroupName
\n
Description: (Required) The name of the\n resource group associated with the instances you\n want to target. You must provide a value for this\n parameter if you specify\n ResourceGroups
for the\n TargetType
parameter.
\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\n\n PatchPolicyName
\n
Description: (Required) A name for the patch\n policy. The value you provide is applied to target\n Amazon EC2 instances as a tag.
\n\n SelectedPatchBaselines
\n
Description: (Required) An array of JSON\n objects containing the information for the patch\n baselines to include in your patch policy.
\n\n PatchBaselineUseDefault
\n
Description: (Optional) A boolean value that\n determines whether the selected patch baselines\n are all Amazon Web Services provided.
\n\n ConfigurationOptionsPatchOperation
\n
Description: (Optional) Determines whether\n target instances scan for available patches, or\n scan and install available patches. The valid\n values are Scan
and\n ScanAndInstall
. The default value for\n the parameter is Scan
.
\n ConfigurationOptionsScanValue
\n
Description: (Optional) A cron expression\n that is used as the schedule for when instances\n scan for available patches.
\n\n ConfigurationOptionsInstallValue
\n
Description: (Optional) A cron expression\n that is used as the schedule for when instances\n install available patches.
\n\n ConfigurationOptionsScanNextInterval
\n
Description: (Optional) A boolean value that\n determines whether instances should scan for\n available patches at the next cron interval. The\n default value is \"false
\".
\n ConfigurationOptionsInstallNextInterval
\n
Description: (Optional) A boolean value that\n determines whether instances should scan for\n available patches at the next cron interval. The\n default value is \"false
\".
\n RebootOption
\n
Description: (Optional) A boolean value that\n determines whether instances are rebooted after\n patches are installed. The default value is\n \"false
\".
\n IsPolicyAttachAllowed
\n
Description: (Optional) A boolean value that\n determines whether Quick Setup attaches policies\n to instances profiles already associated with the\n target instances. The default value is\n \"false
\".
\n OutputLogEnableS3
\n
Description: (Optional) A boolean value that\n determines whether command output logs are sent to\n Amazon S3.
\n\n OutputS3Location
\n
Description: (Optional) A JSON string\n containing information about the Amazon S3\n bucket where you want to store the output details\n of the request.
\n\n OutputS3BucketRegion
\n
Description: (Optional) The Amazon Web Services Region where the Amazon S3\n bucket you want Config to deliver\n command output to is located.
\n\n OutputS3BucketName
\n
Description: (Optional) The name of the\n Amazon S3 bucket you want Config to deliver command output to.
\n\n OutputS3KeyPrefix
\n
Description: (Optional) The key prefix you\n want to use in the custom Amazon S3\n bucket.
\n\n TargetType
\n
Description: (Optional) Determines how\n instances are targeted for local account\n deployments. Don't specify a value for this\n parameter if you're deploying to OUs. The valid\n values are *
,\n InstanceIds
,\n ResourceGroups
, and\n Tags
. Use *
to target\n all instances in the account.
\n TargetInstances
\n
Description: (Optional) A comma separated\n list of instance IDs. You must provide a value for\n this parameter if you specify\n InstanceIds
for the\n TargetType
parameter.
\n TargetTagKey
\n
Description: (Required) The tag key assigned\n to the instances you want to target. You must\n provide a value for this parameter if you specify\n Tags
for the TargetType
\n parameter.
\n TargetTagValue
\n
Description: (Required) The value of the tag\n key assigned to the instances you want to target.\n You must provide a value for this parameter if you\n specify Tags
for the\n TargetType
parameter.
\n ResourceGroupName
\n
Description: (Required) The name of the\n resource group associated with the instances you\n want to target. You must provide a value for this\n parameter if you specify\n ResourceGroups
for the\n TargetType
parameter.
\n TargetAccounts
\n
Description: (Optional) The ID of the\n Amazon Web Services account initiating the\n configuration deployment. You only need to provide\n a value for this parameter if you want to deploy\n the configuration locally. A value must be\n provided for either TargetAccounts
or\n TargetOrganizationalUnits
.
\n TargetOrganizationalUnits
\n
Description: (Optional) A comma separated\n list of organizational units (OUs) you want to\n deploy the configuration to.
\n\n TargetRegions
\n
Description: (Required) A comma separated\n list of Amazon Web Services Regions you want to\n deploy the configuration to.
\nThe version of the Quick Setup type to use.
", + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "LocalDeploymentExecutionRoleName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the IAM role used to deploy local\n configurations.
", + "smithy.api#pattern": "^[\\w+=,.@-]{1,64}$" + } + }, + "LocalDeploymentAdministrationRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "The ARN of the IAM role used to administrate local configuration\n deployments.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Defines the preferences and options for a configuration definition.
" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummary" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummary": { + "type": "structure", + "members": { + "Id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the configuration definition.
" + } + }, + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of the Quick Setup configuration used by the configuration\n definition.
" + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The version of the Quick Setup type used by the configuration definition.
" + } + }, + "FirstClassParameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "The common parameters and values for the configuration definition.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A summarized definition of a Quick Setup configuration definition.
" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsInputList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionInput" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinition" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationManagerList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationManagerSummary" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationManagerSummary": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the Quick Setup configuration.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The description of the configuration.
" + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the configuration
" + } + }, + "StatusSummaries": { + "target": "com.amazonaws.ssmquicksetup#StatusSummariesList", + "traits": { + "smithy.api#documentation": "Summaries of the state of the configuration manager. These summaries include an\n aggregate of the statuses from the configuration definition associated with the\n configuration manager. This includes deployment statuses, association statuses,\n drift statuses, health checks, and more.
" + } + }, + "ConfigurationDefinitionSummaries": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionSummariesList", + "traits": { + "smithy.api#documentation": "A summary of the Quick Setup configuration definition.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A summary of a Quick Setup configuration manager.
" + } + }, + "com.amazonaws.ssmquicksetup#ConfigurationParametersMap": { + "type": "map", + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]+$" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "max": 40960 + } + } + } + }, + "com.amazonaws.ssmquicksetup#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "Another request is being processed. Wait a few minutes and try again.
", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.ssmquicksetup#CreateConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#CreateConfigurationManagerInput" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#CreateConfigurationManagerOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a Quick Setup configuration manager resource. This object is a collection\n of desired state configurations for multiple configuration definitions and\n summaries describing the deployments of those definitions.
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/configurationManager" + } + } + }, + "com.amazonaws.ssmquicksetup#CreateConfigurationManagerInput": { + "type": "structure", + "members": { + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A name for the configuration manager.
", + "smithy.api#pattern": "^[ A-Za-z0-9._-]{0,120}$" + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A description of the configuration manager.
", + "smithy.api#pattern": "^.{0,512}$" + } + }, + "ConfigurationDefinitions": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsInputList", + "traits": { + "smithy.api#documentation": "The definition of the Quick Setup configuration that the configuration manager\n deploys.
", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.ssmquicksetup#TagsMap", + "traits": { + "smithy.api#documentation": "Key-value pairs of metadata to assign to the configuration manager.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#CreateConfigurationManagerOutput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN for the newly created configuration manager.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#DeleteConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#DeleteConfigurationManagerInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a configuration manager.
", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/configurationManager/{ManagerArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#DeleteConfigurationManagerInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the configuration manager.
", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#Filter": { + "type": "structure", + "members": { + "Key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The key for the filter.
", + "smithy.api#length": { + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]*$", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.ssmquicksetup#FilterValues", + "traits": { + "smithy.api#documentation": "The values for the filter keys.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "A key-value pair to filter results.
" + } + }, + "com.amazonaws.ssmquicksetup#FilterValues": { + "type": "list", + "member": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]*$" + } + } + }, + "com.amazonaws.ssmquicksetup#FiltersList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#Filter" + } + }, + "com.amazonaws.ssmquicksetup#GetConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#GetConfigurationManagerInput" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#GetConfigurationManagerOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a configuration manager.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/configurationManager/{ManagerArn}" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "GetConfigurationManagerFailure", + "params": { + "ManagerArn": "arn:aws:ssm-quicksetup:us-east-1:602768233532:configuration-manager/7cac1a1b-64a9-4c9a-97e8-8c68928b8f13" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.ssmquicksetup#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.ssmquicksetup#GetConfigurationManagerInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the configuration manager.
", + "smithy.api#httpLabel": {}, + "smithy.api#length": { + "min": 1 + }, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#GetConfigurationManagerOutput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the configuration manager.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The description of the configuration manager.
" + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the configuration manager.
" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The datetime stamp when the configuration manager was created.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "LastModifiedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The datetime stamp when the configuration manager was last updated.
", + "smithy.api#timestampFormat": "date-time" + } + }, + "StatusSummaries": { + "target": "com.amazonaws.ssmquicksetup#StatusSummariesList", + "traits": { + "smithy.api#documentation": "A summary of the state of the configuration manager. This includes deployment\n statuses, association statuses, drift statuses, health checks, and more.
" + } + }, + "ConfigurationDefinitions": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationDefinitionsList", + "traits": { + "smithy.api#documentation": "The configuration definitions association with the configuration manager.
" + } + }, + "Tags": { + "target": "com.amazonaws.ssmquicksetup#TagsMap", + "traits": { + "smithy.api#documentation": "Key-value pairs of metadata to assign to the configuration manager.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#GetServiceSettings": { + "type": "operation", + "input": { + "target": "smithy.api#Unit" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#GetServiceSettingsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "Returns settings configured for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/serviceSettings" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ssmquicksetup#GetServiceSettingsOutput": { + "type": "structure", + "members": { + "ServiceSettings": { + "target": "com.amazonaws.ssmquicksetup#ServiceSettings", + "traits": { + "smithy.api#documentation": "Returns details about the settings for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#IAMRoleArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "type": "AWS::IAM::Role" + } + } + }, + "com.amazonaws.ssmquicksetup#InternalServerException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "An error occurred on the server side.
", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.ssmquicksetup#ListConfigurationManagers": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#ListConfigurationManagersInput" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#ListConfigurationManagersOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns Quick Setup configuration managers.
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/listConfigurationManagers" + }, + "smithy.api#paginated": { + "items": "ConfigurationManagersList", + "inputToken": "StartingToken", + "outputToken": "NextToken", + "pageSize": "MaxItems" + }, + "smithy.test#smokeTests": [ + { + "id": "ListConfigurationManagersSuccess", + "params": {}, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.ssmquicksetup#ListConfigurationManagersInput": { + "type": "structure", + "members": { + "StartingToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The token to use when requesting a specific set of items from a list.
", + "smithy.api#length": { + "max": 1024 + }, + "smithy.api#pattern": "^[A-Za-z0-9+=@_\\/\\s-]*$" + } + }, + "MaxItems": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "Specifies the maximum number of configuration managers that are returned by the\n request.
", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "Filters": { + "target": "com.amazonaws.ssmquicksetup#FiltersList", + "traits": { + "smithy.api#documentation": "Filters the results returned by the request.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#ListConfigurationManagersOutput": { + "type": "structure", + "members": { + "ConfigurationManagersList": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationManagerList", + "traits": { + "smithy.api#documentation": "The configuration managers returned by the request.
" + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The token to use when requesting the next set of configuration managers. If there\n are no additional operations to return, the string is empty.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#ListQuickSetupTypes": { + "type": "operation", + "input": { + "target": "smithy.api#Unit" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#ListQuickSetupTypesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "Returns the available Quick Setup types.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/listQuickSetupTypes" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListQuickSetupTypesSuccess", + "params": {}, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.ssmquicksetup#ListQuickSetupTypesOutput": { + "type": "structure", + "members": { + "QuickSetupTypeList": { + "target": "com.amazonaws.ssmquicksetup#QuickSetupTypeList", + "traits": { + "smithy.api#documentation": "An array of Quick Setup types.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.ssmquicksetup#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns tags assigned to the resource.
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ssmquicksetup#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the resource the tag is assigned to.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.ssmquicksetup#Tags", + "traits": { + "smithy.api#documentation": "Key-value pairs of metadata assigned to the resource.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ssmquicksetup#QuickSetup": { + "type": "service", + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.ssmquicksetup#CreateConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#DeleteConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#GetConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#GetServiceSettings" + }, + { + "target": "com.amazonaws.ssmquicksetup#ListConfigurationManagers" + }, + { + "target": "com.amazonaws.ssmquicksetup#ListQuickSetupTypes" + }, + { + "target": "com.amazonaws.ssmquicksetup#ListTagsForResource" + }, + { + "target": "com.amazonaws.ssmquicksetup#TagResource" + }, + { + "target": "com.amazonaws.ssmquicksetup#UntagResource" + }, + { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinition" + }, + { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationManager" + }, + { + "target": "com.amazonaws.ssmquicksetup#UpdateServiceSettings" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "SSM QuickSetup", + "arnNamespace": "ssm-quicksetup", + "cloudTrailEventSource": "ssm-quicksetup.amazonaws.com" + }, + "aws.auth#sigv4": { + "name": "ssm-quicksetup" + }, + "aws.iam#supportedPrincipalTypes": [ + "Root", + "IAMUser", + "IAMRole", + "FederatedUser" + ], + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "Authorization", + "Content-Type", + "Credentials", + "X-Amz-Date", + "X-Api-Key", + "X-Amz-Security-Token", + "x-amz-content-sha256", + "X-Amz-User-Agent" + ] + }, + "smithy.api#documentation": "Quick Setup helps you quickly configure frequently used services and features with\n recommended best practices. Quick Setup simplifies setting up services, including\n Systems Manager, by automating common or recommended tasks.
", + "smithy.api#title": "AWS Systems Manager QuickSetup", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ssm-quicksetup.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.ssmquicksetup#QuickSetupTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#QuickSetupTypeOutput" + } + }, + "com.amazonaws.ssmquicksetup#QuickSetupTypeOutput": { + "type": "structure", + "members": { + "Type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of the Quick Setup configuration.
" + } + }, + "LatestVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The latest version number of the configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the Quick Setup type.
" + } + }, + "com.amazonaws.ssmquicksetup#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "The resource couldn't be found. Check the ID or name and try again.
", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.ssmquicksetup#ServiceSettings": { + "type": "structure", + "members": { + "ExplorerEnablingRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "The IAM role used to enable Explorer.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Settings configured for Quick Setup.
" + } + }, + "com.amazonaws.ssmquicksetup#Status": { + "type": "enum", + "members": { + "INITIALIZING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INITIALIZING" + } + }, + "DEPLOYING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEPLOYING" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "STOPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_FAILED" + } + }, + "STOP_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOP_FAILED" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.ssmquicksetup#StatusDetails": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.ssmquicksetup#StatusSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#StatusSummary" + } + }, + "com.amazonaws.ssmquicksetup#StatusSummary": { + "type": "structure", + "members": { + "StatusType": { + "target": "com.amazonaws.ssmquicksetup#StatusType", + "traits": { + "smithy.api#documentation": "The type of a status summary.
", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.ssmquicksetup#Status", + "traits": { + "smithy.api#documentation": "The current status.
" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "When applicable, returns an informational message relevant to the current status and status type of the status summary object. We don't recommend implementing parsing logic around this value since the messages returned can vary in format.
" + } + }, + "LastUpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "The datetime stamp when the status was last updated.
", + "smithy.api#required": {}, + "smithy.api#timestampFormat": "date-time" + } + }, + "StatusDetails": { + "target": "com.amazonaws.ssmquicksetup#StatusDetails", + "traits": { + "smithy.api#documentation": "Details about the status.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A summarized description of the status.
" + } + }, + "com.amazonaws.ssmquicksetup#StatusType": { + "type": "enum", + "members": { + "DEPLOYMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deployment" + } + }, + "ASYNC_EXECUTIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AsyncExecutions" + } + } + } + }, + "com.amazonaws.ssmquicksetup#TagEntry": { + "type": "structure", + "members": { + "Key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The key for the tag.
", + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "Value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The value for the tag.
", + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + } + }, + "traits": { + "smithy.api#documentation": "Key-value pairs of metadata.
", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.ssmquicksetup#TagKeys": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.ssmquicksetup#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#TagResourceInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Assigns key-value pairs of metadata to Amazon Web Services resources.
", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#TagResourceInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the resource to tag.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.ssmquicksetup#TagsMap", + "traits": { + "smithy.api#documentation": "Key-value pairs of metadata to assign to the resource.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#Tags": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmquicksetup#TagEntry" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.ssmquicksetup#TagsMap": { + "type": "map", + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.ssmquicksetup#ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The request or operation exceeds the maximum allowed request rate per Amazon Web Services account and Amazon Web Services Region.
", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.ssmquicksetup#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UntagResourceInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Removes tags from the specified resource.
", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UntagResourceInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the resource to remove tags from.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.ssmquicksetup#TagKeys", + "traits": { + "smithy.api#documentation": "The keys of the tags to remove from the resource.
", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinition": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinitionInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates a Quick Setup configuration definition.
", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/configurationDefinition/{ManagerArn}/{Id}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationDefinitionInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the configuration manager associated with the definition to\n update.
", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + }, + "Id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the configuration definition you want to update.
", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^[a-z0-9-]{1,20}$", + "smithy.api#required": {} + } + }, + "TypeVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The version of the Quick Setup type to use.
", + "smithy.api#pattern": "^\\d{1,3}(\\.\\d{1,3})?$|^LATEST$" + } + }, + "Parameters": { + "target": "com.amazonaws.ssmquicksetup#ConfigurationParametersMap", + "traits": { + "smithy.api#documentation": "The parameters for the configuration definition type.
" + } + }, + "LocalDeploymentExecutionRoleName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the IAM role used to deploy local\n configurations.
", + "smithy.api#pattern": "^[\\w+=,.@-]{1,64}$" + } + }, + "LocalDeploymentAdministrationRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "The ARN of the IAM role used to administrate local configuration\n deployments.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationManager": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UpdateConfigurationManagerInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates a Quick Setup configuration manager.
", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/configurationManager/{ManagerArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateConfigurationManagerInput": { + "type": "structure", + "members": { + "ManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the configuration manager.
", + "smithy.api#httpLabel": {}, + "smithy.api#pattern": "^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$", + "smithy.api#required": {} + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A name for the configuration manager.
", + "smithy.api#pattern": "^[ A-Za-z0-9._-]{0,120}$" + } + }, + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "A description of the configuration manager.
", + "smithy.api#pattern": "^.{0,512}$" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateServiceSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmquicksetup#UpdateServiceSettingsInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.ssmquicksetup#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ConflictException" + }, + { + "target": "com.amazonaws.ssmquicksetup#InternalServerException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ThrottlingException" + }, + { + "target": "com.amazonaws.ssmquicksetup#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates settings configured for Quick Setup.
", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/serviceSettings" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.ssmquicksetup#UpdateServiceSettingsInput": { + "type": "structure", + "members": { + "ExplorerEnablingRoleArn": { + "target": "com.amazonaws.ssmquicksetup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "The IAM role used to enable Explorer.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmquicksetup#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "The request is invalid. Verify the values provided for the request parameters are\n accurate.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} \ No newline at end of file diff --git a/models/ssm.json b/models/ssm.json index 20551d6f77..892e047223 100644 --- a/models/ssm.json +++ b/models/ssm.json @@ -6687,7 +6687,7 @@ "RejectedPatchesAction": { "target": "com.amazonaws.ssm#PatchAction", "traits": { - "smithy.api#documentation": "The action for Patch Manager to take on patches included in the\n RejectedPackages
list.
\n \n ALLOW_AS_DEPENDENCY
\n : A package in the\n Rejected
patches list is installed only if it is a dependency of another package.\n It is considered compliant with the patch baseline, and its status is reported as\n InstalledOther
. This is the default action if no option is specified.
\n BLOCK: Packages in the Rejected\n patches list, and packages that include them as dependencies, aren't installed by\n Patch Manager under any circumstances. If a package was installed before it was added to the\n Rejected patches list, or is installed outside of Patch\n Manager afterward, it's considered noncompliant with the patch baseline and its status is\n reported as InstalledRejected.
\nThe action for Patch Manager to take on patches included in the\n RejectedPackages
list.
\n Linux and macOS: A package in the rejected patches list\n is installed only if it is a dependency of another package. It is considered compliant with\n the patch baseline, and its status is reported as INSTALLED_OTHER
. This is the\n default action if no option is specified.
\n Windows Server: Windows Server doesn't support the\n concept of package dependencies. If a package in the rejected patches list and already\n installed on the node, its status is reported as INSTALLED_OTHER
. Any package not\n already installed on the node is skipped. This is the default action if no option is\n specified.
\n All OSs: Packages in the rejected patches list, and\n packages that include them as dependencies, aren't installed by Patch Manager under any\n circumstances. If a package was installed before it was added to the rejected patches list, or\n is installed outside of Patch Manager afterward, it's considered noncompliant with the patch\n baseline and its status is reported as INSTALLED_REJECTED
.
Provides information about one or more of your managed nodes, including the operating system\n platform, SSM Agent version, association status, and IP address. This operation does not return\n information for nodes that are either Stopped or Terminated.
\nIf you specify one or more node IDs, the operation returns information for those managed\n nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you\n specify a node ID that isn't valid or a node that you don't own, you receive an error.
\nThe IamRole
field returned for this API operation is the Identity and Access Management (IAM) role assigned to on-premises managed nodes. This operation does not\n return the IAM role for EC2 instances.
Provides information about one or more of your managed nodes, including the operating system\n platform, SSM Agent version, association status, and IP address. This operation does not return\n information for nodes that are either Stopped or Terminated.
\nIf you specify one or more node IDs, the operation returns information for those managed\n nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you\n specify a node ID that isn't valid or a node that you don't own, you receive an error.
\nThe IamRole
field returned for this API operation is the role assigned to an\n Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or\n the role assigned to an on-premises managed node.
An API operation used by the Systems Manager console to display information about Systems Manager managed nodes.
", + "smithy.api#documentation": "An API operation used by the Systems Manager console to display information about Systems Manager managed\n nodes.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -9138,7 +9138,7 @@ "MaxResults": { "target": "com.amazonaws.ssm#DescribeInstancePropertiesMaxResults", "traits": { - "smithy.api#documentation": "The maximum number of items to return for the call. The call also returns a token that you\n can specify in a subsequent call to get the next set of results.
" + "smithy.api#documentation": "The maximum number of items to return for the call. The call also returns a token that you\n can specify in a subsequent call to get the next set of results.
" } }, "NextToken": { @@ -9164,7 +9164,7 @@ "NextToken": { "target": "com.amazonaws.ssm#NextToken", "traits": { - "smithy.api#documentation": "The token for the next set of properties to return. Use this token to get the next set of\n results.
" + "smithy.api#documentation": "The token for the next set of properties to return. Use this token to get the next set of\n results.
" } } }, @@ -10323,7 +10323,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the properties of available patches organized by product, product family,\n classification, severity, and other properties of available patches. You can use the reported\n properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.
\nThe following section lists the properties that can be used in filters for each major\n operating system type:
\nValid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| PRIORITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| PRIORITY
\n
Valid properties: PRODUCT
| PRODUCT_FAMILY
|\n CLASSIFICATION
| MSRC_SEVERITY
\n
Lists the properties of available patches organized by product, product family,\n classification, severity, and other properties of available patches. You can use the reported\n properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.
\nThe following section lists the properties that can be used in filters for each major\n operating system type:
\nValid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| PRIORITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| CLASSIFICATION
|\n SEVERITY
\n
Valid properties: PRODUCT
| PRIORITY
\n
Valid properties: PRODUCT
| PRODUCT_FAMILY
|\n CLASSIFICATION
| MSRC_SEVERITY
\n
Returns detailed information about command execution for an invocation or plugin.
\n\n GetCommandInvocation
only gives the execution status of a plugin in a document.\n To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes,\n use ListCommands.
Returns detailed information about command execution for an invocation or plugin. The Run\n Command API follows an eventual consistency model, due to the distributed nature of the system\n supporting the API. This means that the result of an API command you run that affects your\n resources might not be immediately visible to all subsequent commands you run. You should keep\n this in mind when you carry out an API command that immediately follows a previous API\n command.
\n\n GetCommandInvocation
only gives the execution status of a plugin in a document.\n To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes,\n use ListCommands.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.
" } }, "TaskType": { @@ -14797,7 +14797,7 @@ "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager\n managed node. This call doesn't return the IAM role for Amazon Elastic Compute Cloud\n (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use\n the Amazon EC2 DescribeInstances
operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.
The role assigned to an Amazon EC2 instance configured with a Systems Manager\n Quick Setup host management configuration or the role assigned to an on-premises managed\n node.
\n This call doesn't return the IAM role for unmanaged\n Amazon EC2 instances (instances not configured for Systems Manager). To retrieve the\n role for an unmanaged instance, use the Amazon EC2 DescribeInstances
operation. For\n information, see DescribeInstances in the\n Amazon EC2 API Reference or describe-instances in the\n Amazon Web Services CLI Command Reference.
The value of the EC2 Name
tag associated with the node. If a Name
tag hasn't been applied to the node, this value is blank.
The value of the EC2 Name
tag associated with the node. If a Name
\n tag hasn't been applied to the node, this value is blank.
The instance profile attached to the node. If an instance profile isn't attached to the node, this value is blank.
" + "smithy.api#documentation": "The instance profile attached to the node. If an instance profile isn't attached to the\n node, this value is blank.
" } }, "KeyName": { "target": "com.amazonaws.ssm#KeyName", "traits": { - "smithy.api#documentation": "The name of the key pair associated with the node. If a key pair isnt't associated with the node, this value is blank.
" + "smithy.api#documentation": "The name of the key pair associated with the node. If a key pair isnt't associated with the\n node, this value is blank.
" } }, "InstanceState": { @@ -15365,13 +15365,13 @@ "Architecture": { "target": "com.amazonaws.ssm#Architecture", "traits": { - "smithy.api#documentation": "The CPU architecture of the node. For example, x86_64.
" + "smithy.api#documentation": "The CPU architecture of the node. For example, x86_64
.
The public IPv4 address assigned to the node. If a public IPv4 address isn't assigned to the node, this value is blank.
" + "smithy.api#documentation": "The public IPv4 address assigned to the node. If a public IPv4 address isn't assigned to the\n node, this value is blank.
" } }, "LaunchTime": { @@ -15419,13 +15419,13 @@ "ActivationId": { "target": "com.amazonaws.ssm#ActivationId", "traits": { - "smithy.api#documentation": "The activation ID created by Systems Manager when the server or virtual machine (VM) was registered
" + "smithy.api#documentation": "The activation ID created by Systems Manager when the server or virtual machine (VM) was\n registered
" } }, "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "The IAM role used in the hybrid activation to register the node with Systems Manager.
" + "smithy.api#documentation": "The IAM role used in the hybrid activation to register the node with\n Systems Manager.
" } }, "RegistrationDate": { @@ -15503,7 +15503,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes a filter for a specific list of managed nodes. You can filter node information by using tags. You specify tags by using a key-value mapping.
" + "smithy.api#documentation": "Describes a filter for a specific list of managed nodes. You can filter node information by\n using tags. You specify tags by using a key-value mapping.
" } }, "com.amazonaws.ssm#InstancePropertyFilterKey": { @@ -18147,7 +18147,20 @@ "outputToken": "NextToken", "items": "DocumentIdentifiers", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListDocumentsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.ssm#ListDocumentsRequest": { @@ -19503,7 +19516,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.
" } }, "TimeoutSeconds": { @@ -19697,7 +19710,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.
" } }, "MaxConcurrency": { @@ -23249,7 +23262,7 @@ "target": "com.amazonaws.ssm#DefaultBaseline", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch\n baselines. For example, you can create a default patch baseline for each operating system.
" + "smithy.api#documentation": "Indicates whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default\n patch baselines. For example, you can create a default patch baseline for each operating\n system.
" } } }, @@ -23984,13 +23997,13 @@ "target": "com.amazonaws.ssm#ApproveAfterDays", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "The number of days after the release date of each patch matched by the rule that the patch\n is marked as approved in the patch baseline. For example, a value of 7
means that\n patches are approved seven days after they are released. Not supported on Debian Server or Ubuntu\n Server.
The number of days after the release date of each patch matched by the rule that the patch\n is marked as approved in the patch baseline. For example, a value of 7
means that\n patches are approved seven days after they are released.
This parameter is marked as not required, but your request must include a value\n for either ApproveAfterDays
or ApproveUntilDate
.
Not supported for Debian Server or Ubuntu Server.
" } }, "ApproveUntilDate": { "target": "com.amazonaws.ssm#PatchStringDateTime", "traits": { - "smithy.api#documentation": "The cutoff date for auto approval of released patches. Any patches released on or before\n this date are installed automatically. Not supported on Debian Server or Ubuntu Server.
\nEnter dates in the format YYYY-MM-DD
. For example,\n 2021-12-31
.
The cutoff date for auto approval of released patches. Any patches released on or before\n this date are installed automatically.
\nEnter dates in the format YYYY-MM-DD
. For example,\n 2021-12-31
.
This parameter is marked as not required, but your request must include a value\n for either ApproveUntilDate
or ApproveAfterDays
.
Not supported for Debian Server or Ubuntu Server.
" } }, "EnableNonSecurity": { @@ -29779,7 +29792,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service \n(Amazon SNS) notifications for maintenance window Run Command tasks.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.
" } }, "TaskParameters": { @@ -30190,7 +30203,7 @@ "RejectedPatchesAction": { "target": "com.amazonaws.ssm#PatchAction", "traits": { - "smithy.api#documentation": "The action for Patch Manager to take on patches included in the\n RejectedPackages
list.
\n \n ALLOW_AS_DEPENDENCY
\n : A package in the\n Rejected
patches list is installed only if it is a dependency of another package.\n It is considered compliant with the patch baseline, and its status is reported as\n InstalledOther
. This is the default action if no option is specified.
\n BLOCK: Packages in the Rejected\n patches list, and packages that include them as dependencies, aren't installed by\n Patch Manager under any circumstances. If a package was installed before it was added to the\n Rejected patches list, or is installed outside of Patch\n Manager afterward, it's considered noncompliant with the patch baseline and its status is\n reported as InstalledRejected.
\nThe action for Patch Manager to take on patches included in the\n RejectedPackages
list.
\n Linux and macOS: A package in the rejected patches list\n is installed only if it is a dependency of another package. It is considered compliant with\n the patch baseline, and its status is reported as INSTALLED_OTHER
. This is the\n default action if no option is specified.
\n Windows Server: Windows Server doesn't support the\n concept of package dependencies. If a package in the rejected patches list and already\n installed on the node, its status is reported as INSTALLED_OTHER
. Any package not\n already installed on the node is skipped. This is the default action if no option is\n specified.
\n All OSs: Packages in the rejected patches list, and\n packages that include them as dependencies, aren't installed by Patch Manager under any\n circumstances. If a package was installed before it was added to the rejected patches list, or\n is installed outside of Patch Manager afterward, it's considered noncompliant with the patch\n baseline and its status is reported as INSTALLED_REJECTED
.
The Amazon Web Services Support API Reference is intended for programmers who need detailed\n information about the Amazon Web Services Support operations and data types. You can use the API to manage\n your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return\n results in JSON format.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of\n checks and their descriptions, get check results, specify checks to refresh, and get the\n refresh status of checks.
\nYou can manage your support cases with the following Amazon Web Services Support API operations:
\nThe CreateCase, DescribeCases, DescribeAttachment, and ResolveCase operations\n create Amazon Web Services Support cases, retrieve information about cases, and resolve cases.
\nThe DescribeCommunications, AddCommunicationToCase, and AddAttachmentsToSet operations retrieve and add communications and attachments to Amazon Web Services Support\n cases.
\nThe DescribeServices and DescribeSeverityLevels operations return Amazon Web Service names, service codes, service categories, and problem\n severity levels. You use these values when you call the CreateCase operation.
\nYou can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more\n information, see Trusted Advisor in the\n Amazon Web Services Support User Guide.
\nFor authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing\n Process.
\nFor more information about this service and the endpoints to use, see About the\n Amazon Web Services Support API in the Amazon Web Services Support User Guide.
", + "smithy.api#documentation": "The Amazon Web Services Support API Reference is intended for programmers who need detailed\n information about the Amazon Web Services Support operations and data types. You can use the API to manage\n your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return\n results in JSON format.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of\n checks and their descriptions, get check results, specify checks to refresh, and get the\n refresh status of checks.
\nYou can manage your support cases with the following Amazon Web Services Support API operations:
\nThe CreateCase, DescribeCases, DescribeAttachment, and ResolveCase operations\n create Amazon Web Services Support cases, retrieve information about cases, and resolve cases.
\nThe DescribeCommunications, AddCommunicationToCase, and AddAttachmentsToSet operations retrieve and add communications and attachments to Amazon Web Services Support\n cases.
\nThe DescribeServices and DescribeSeverityLevels operations return Amazon Web Service names, service codes, service categories, and problem\n severity levels. You use these values when you call the CreateCase operation.
\nYou can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more\n information, see Trusted Advisor in the\n Amazon Web Services Support User Guide.
\nFor authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing\n Process.
\nFor more information about this service and the endpoints to use, see About the\n Amazon Web Services Support API in the Amazon Web Services Support User Guide.
", "smithy.api#title": "AWS Support", "smithy.api#xmlNamespace": { "uri": "http://support.amazonaws.com/doc/2013-04-15/" @@ -1387,7 +1387,7 @@ } }, "traits": { - "smithy.api#documentation": "An attachment to a case communication. The attachment consists of the file name and\n the content of the file.
" + "smithy.api#documentation": "An attachment to a case communication. The attachment consists of the file name and\n the content of the file. Each attachment file size should not exceed 5 MB. File types that are supported include the following: pdf, jpeg,.doc, .log, .text
" } }, "com.amazonaws.support#AttachmentDetails": { @@ -1554,7 +1554,7 @@ "status": { "target": "com.amazonaws.support#Status", "traits": { - "smithy.api#documentation": "The status of the case.
\nValid values:
\n\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
The status of the case.
\nValid values:
\n\n all-open
\n
\n customer-action-completed
\n
\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
A JSON-formatted object that contains the metadata for a support case. It is contained\n in the response from a DescribeCases request. CaseDetails contains the following fields:
\n\n caseId - The support case ID requested\n or returned in the call. The case ID is an alphanumeric string formatted as\n shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47.
\n\n categoryCode - The category of problem\n for the support case. Corresponds to the CategoryCode
values\n returned by a call to DescribeServices.
\n displayId - The identifier for the case\n on pages in the Amazon Web Services Support Center.
\n\n language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1\ncode for the language
parameter if you want support in that language.
\n nextToken - A resumption point for\n pagination.
\n\n recentCommunications - One or more Communication objects. Fields of these objects are\n attachments
, body
, caseId
,\n submittedBy
, and timeCreated
.
\n serviceCode - The identifier for the\n Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.
\n\n severityCode - The severity code\n assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are:\n low
, normal
, high
,\n urgent
, and critical
.
\n status - The status of the case in the\n Amazon Web Services Support Center. Valid values:
\n\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
\n subject - The subject line of the\n case.
\n\n submittedBy - The email address of the\n account that submitted the case.
\n\n timeCreated - The time the case was\n created, in ISO-8601 format.
\nA JSON-formatted object that contains the metadata for a support case. It is contained\n in the response from a DescribeCases request. CaseDetails contains the following fields:
\n\n caseId - The support case ID requested\n or returned in the call. The case ID is an alphanumeric string formatted as\n shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47.
\n\n categoryCode - The category of problem\n for the support case. Corresponds to the CategoryCode
values\n returned by a call to DescribeServices.
\n displayId - The identifier for the case\n on pages in the Amazon Web Services Support Center.
\n\n language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1\ncode for the language
parameter if you want support in that language.
\n nextToken - A resumption point for\n pagination.
\n\n recentCommunications - One or more Communication objects. Fields of these objects are\n attachments
, body
, caseId
,\n submittedBy
, and timeCreated
.
\n serviceCode - The identifier for the\n Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.
\n\n severityCode - The severity code\n assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are:\n low
, normal
, high
,\n urgent
, and critical
.
\n status - The status of the case in the\n Amazon Web Services Support Center. Valid values:
\n\n all-open
\n
\n customer-action-completed
\n
\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
\n subject - The subject line of the\n case.
\n\n submittedBy - The email address of the\n account that submitted the case.
\n\n timeCreated - The time the case was\n created, in ISO-8601 format.
\nCreates a function package.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network. For more information, see Function packages in the Amazon Web Services Telco Network Builder User Guide. \n
\nCreating a function package is the first step for creating a network in AWS TNB. This request creates an empty container with an ID. The next step is to upload the actual CSAR zip file into that empty container. To upload function package content, see PutSolFunctionPackageContent.
", + "smithy.api#documentation": "Creates a function package.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network. For more information, see Function packages in the\n Amazon Web Services Telco Network Builder User Guide.
\nCreating a function package is the first step for creating a network in AWS TNB. This\n request creates an empty container with an ID. The next step is to upload the actual CSAR\n zip file into that empty container. To upload function package content, see PutSolFunctionPackageContent.
", "smithy.api#examples": [ { "title": "Create a Sol function package", @@ -221,7 +221,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. Creating a network instance is the third step after creating a network package. For more information about network instances, Network instances in the Amazon Web Services Telco Network Builder User Guide.
\nOnce you create a network instance, you can instantiate it. To instantiate a network, see InstantiateSolNetworkInstance.
", + "smithy.api#documentation": "Creates a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. Creating a network instance is the third step after creating a network\n package. For more information about network instances, Network instances in the\n Amazon Web Services Telco Network Builder User Guide.
\nOnce you create a network instance, you can instantiate it. To instantiate a network,\n see InstantiateSolNetworkInstance.
", "smithy.api#examples": [ { "title": "Create a Sol Network Instance", @@ -361,7 +361,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a network package.
\nA network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on. For more information, see Network instances in the Amazon Web Services Telco Network Builder User Guide. \n
\nA network package consists of a network service descriptor (NSD) file (required) and any additional files (optional), such as scripts specific to your needs. For example, if you have multiple function packages in your network package, you can use the NSD to define which network functions should run in certain VPCs, subnets, or EKS clusters.
\nThis request creates an empty network package container with an ID. Once you create a network package, you can upload the network package content using PutSolNetworkPackageContent.
", + "smithy.api#documentation": "Creates a network package.
\nA network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on. For more information, see Network instances in the\n Amazon Web Services Telco Network Builder User Guide.
\nA network package consists of a network service descriptor (NSD) file (required) and any\n additional files (optional), such as scripts specific to your needs. For example, if you\n have multiple function packages in your network package, you can use the NSD to define\n which network functions should run in certain VPCs, subnets, or EKS clusters.
\nThis request creates an empty network package container with an ID. Once you create a\n network package, you can upload the network package content using PutSolNetworkPackageContent.
", "smithy.api#examples": [ { "title": "Create a Sol network package", @@ -478,7 +478,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a function package.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.
\nTo delete a function package, the package must be in a disabled state. To disable a function package, see UpdateSolFunctionPackage.\n
", + "smithy.api#documentation": "Deletes a function package.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.
\nTo delete a function package, the package must be in a disabled state. To disable a\n function package, see UpdateSolFunctionPackage.
", "smithy.api#examples": [ { "title": "Delete a function package", @@ -537,7 +537,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
\nTo delete a network instance, the instance must be in a stopped or terminated state. To terminate a network instance, see TerminateSolNetworkInstance.
", + "smithy.api#documentation": "Deletes a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
\nTo delete a network instance, the instance must be in a stopped or terminated state. To\n terminate a network instance, see TerminateSolNetworkInstance.
", "smithy.api#examples": [ { "title": "Delete a Sol Network Instance.", @@ -596,7 +596,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes network package.
\nA network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.
\nTo delete a network package, the package must be in a disable state. To disable a network package, see UpdateSolNetworkPackage.
", + "smithy.api#documentation": "Deletes network package.
\nA network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.
\nTo delete a network package, the package must be in a disable state. To disable a\n network package, see UpdateSolNetworkPackage.
", "smithy.api#examples": [ { "title": "Delete a Sol network package", @@ -717,7 +717,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets the details of a network function instance, including the instantation state and metadata from the function package descriptor in the network function package.
\nA network function instance is a function in a function package .
", + "smithy.api#documentation": "Gets the details of a network function instance, including the instantiation state and\n metadata from the function package descriptor in the network function package.
\nA network function instance is a function in a function package .
", "smithy.api#examples": [ { "title": "Get a Sol Network Function Instance details", @@ -910,7 +910,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets the details of an individual function package, such as the operational state and whether the package is in use.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network..
", + "smithy.api#documentation": "Gets the details of an individual function package, such as the operational state and\n whether the package is in use.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network..
", "smithy.api#examples": [ { "title": "Describe a function package with correct vnfPkgId", @@ -1098,7 +1098,7 @@ "accept": { "target": "com.amazonaws.tnb#DescriptorContentType", "traits": { - "smithy.api#documentation": "Indicates which content types, expressed as MIME types, the client is able to understand.
", + "smithy.api#documentation": "Indicates which content types, expressed as MIME types, the client is able to\n understand.
", "smithy.api#httpHeader": "Accept", "smithy.api#required": {} } @@ -1217,7 +1217,7 @@ "vnfdId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Function package descriptor ID.
" + "smithy.api#documentation": "Function package descriptor ID.
" } }, "vnfProvider": { @@ -1462,7 +1462,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets the details of a network operation, including the tasks involved in the network operation and the status of the tasks.
\nA network operation is any operation that is done to your network, such as network instance instantiation or termination.
", + "smithy.api#documentation": "Gets the details of a network operation, including the tasks involved in the network\n operation and the status of the tasks.
\nA network operation is any operation that is done to your network, such as network instance instantiation or termination.
", "smithy.api#examples": [ { "title": "Get Sol Network Instantiate operation", @@ -1476,6 +1476,85 @@ "operationState": "COMPLETED", "lcmOperationType": "INSTANTIATE", "metadata": { + "instantiateMetadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", + "additionalParamsForNs": { + "cidr_block": "10.0.0.0/16", + "availability_zone": "us-west-2a" + } + }, + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + }, + "tasks": [ + { + "taskName": "HookExecution", + "taskContext": { + "cloudWatchLogsARN": "arn:aws:logs:us-east-1:123456789000:log-group:/aws/codebuild/TestProject:log-stream:a4dc6b0b-2ea3-48c5-bb30-636c4f376b81" + }, + "taskStatus": "IN_PROGRESS", + "taskStartTime": "2022-06-10T19:48:34Z", + "taskEndTime": "2022-06-10T21:48:33Z" + } + ] + } + }, + { + "title": "Get Sol Network Update operation", + "input": { + "nsLcmOpOccId": "no-0d5b823eb5c2a9241" + }, + "output": { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "UPDATE_NS", + "metadata": { + "updateNsMetadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", + "additionalParamsForNs": { + "cidr_block": "10.0.0.0/16", + "availability_zone": "us-west-2a" + } + }, + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + }, + "tasks": [ + { + "taskName": "HookExecution", + "taskContext": { + "cloudWatchLogsARN": "arn:aws:logs:us-east-1:123456789000:log-group:/aws/codebuild/TestProject:log-stream:a4dc6b0b-2ea3-48c5-bb30-636c4f376b81" + }, + "taskStatus": "IN_PROGRESS", + "taskStartTime": "2022-06-10T19:48:34Z", + "taskEndTime": "2022-06-10T21:48:33Z" + } + ] + } + }, + { + "title": "Get Sol Network Update operation", + "input": { + "nsLcmOpOccId": "no-0d5b823eb5c2a9241" + }, + "output": { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "MODIFY_VNF_INFORMATION", + "metadata": { + "modifyVnfInfoMetadata": { + "vnfInstanceId": "fi-0d5b823eb5c2a9241", + "vnfConfigurableProperties": { + "pcf.port": "8080", + "pcf.pods": "10" + } + }, "createdAt": "2022-06-10T19:48:34Z", "lastModified": "2022-06-10T21:48:33Z" }, @@ -1561,6 +1640,24 @@ "com.amazonaws.tnb#GetSolNetworkOperationMetadata": { "type": "structure", "members": { + "updateNsMetadata": { + "target": "com.amazonaws.tnb#UpdateNsMetadata", + "traits": { + "smithy.api#documentation": "Metadata related to the network operation occurrence for network instance updates.\n This is populated only if the lcmOperationType is UPDATE
and the\n updateType is UPDATE_NS
.
Metadata related to the network operation occurrence for network function updates in a network instance.\n This is populated only if the lcmOperationType is UPDATE
and the\n updateType is MODIFY_VNF_INFORMATION
.
Metadata related to the network operation occurrence for network instantiation.\n This is populated only if the lcmOperationType is INSTANTIATE
.
Type of the operation represented by this occurrence.
" } }, + "updateType": { + "target": "com.amazonaws.tnb#UpdateSolNetworkType", + "traits": { + "smithy.api#documentation": "Type of the update. Only present if the network operation\n lcmOperationType is UPDATE
.
Metadata related to the onboarded network service descriptor in the network package.
" + "smithy.api#documentation": "Metadata related to the onboarded network service descriptor in the network\n package.
" } }, "createdAt": { @@ -2053,7 +2156,7 @@ "vnfPkgIds": { "target": "com.amazonaws.tnb#VnfPkgIdList", "traits": { - "smithy.api#documentation": "Identifies the function package for the function package descriptor referenced by the onboarded network package.
", + "smithy.api#documentation": "Identifies the function package for the function package descriptor referenced by the\n onboarded network package.
", "smithy.api#required": {} } }, @@ -2140,6 +2243,27 @@ "smithy.api#documentation": "The metadata of a network function.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
" } }, + "com.amazonaws.tnb#InstantiateMetadata": { + "type": "structure", + "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "The network service descriptor used for instantiating the network instance.
", + "smithy.api#required": {} + } + }, + "additionalParamsForNs": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "The configurable properties used during instantiation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Metadata related to the configuration properties used during instantiation of the network instance.
" + } + }, "com.amazonaws.tnb#InstantiateSolNetworkInstance": { "type": "operation", "input": { @@ -2169,7 +2293,7 @@ } ], "traits": { - "smithy.api#documentation": "Instantiates a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
\nBefore you can instantiate a network instance, you have to create a network instance. For more information, see CreateSolNetworkInstance.
", + "smithy.api#documentation": "Instantiates a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
\nBefore you can instantiate a network instance, you have to create a network instance.\n For more information, see CreateSolNetworkInstance.
", "smithy.api#examples": [ { "title": "Instantiate a Sol Network Instance", @@ -2234,7 +2358,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.
" + "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.
" } } }, @@ -2255,7 +2379,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.
" + "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.
" } } }, @@ -3052,7 +3176,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists details for a network operation, including when the operation started and the status of the operation.
\nA network operation is any operation that is done to your network, such as network instance instantiation or termination.
", + "smithy.api#documentation": "Lists details for a network operation, including when the operation started and the\n status of the operation.
\nA network operation is any operation that is done to your network, such as network instance instantiation or termination.
", "smithy.api#examples": [ { "title": "List Sol Network Instantiate operations", @@ -3066,6 +3190,7 @@ "operationState": "COMPLETED", "lcmOperationType": "INSTANTIATE", "metadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", "createdAt": "2022-06-10T19:48:34Z", "lastModified": "2022-06-10T21:48:33Z" } @@ -3092,12 +3217,59 @@ "detail": "An error occurred (InsufficientInstanceCapacity) when calling the RunInstances operation (reached max retries: 4). We currently do not have sufficient capacity in the Availability Zone you requested" }, "metadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", "createdAt": "2022-06-10T19:48:33Z", "lastModified": "2022-06-10T19:48:33Z" } } ] } + }, + { + "title": "List Sol Network Update operations", + "input": { + "nsInstanceId": "ni-0d5b823eb5c2a9241" + }, + "output": { + "networkOperations": [ + { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "MODIFY_VNF_INFORMATION", + "metadata": { + "vnfInstanceId": "fi-0d5b823eb5c2a9241", + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + } + } + ] + } + }, + { + "title": "List Sol Network Update operations", + "input": { + "nsInstanceId": "ni-0d5b823eb5c2a9241" + }, + "output": { + "networkOperations": [ + { + "id": "no-0d5b823eb5c2a9241", + "arn": "arn:aws:tnb:us-west-2:123456789000:network-operation/no-0d5b823eb5c2a9241", + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "operationState": "COMPLETED", + "lcmOperationType": "UPDATE", + "updateType": "UPDATE_NS", + "metadata": { + "nsdInfoId": "np-0d0f3e2eae4fc1ac1", + "createdAt": "2022-06-10T19:48:34Z", + "lastModified": "2022-06-10T21:48:33Z" + } + } + ] + } } ], "smithy.api#http": { @@ -3151,6 +3323,12 @@ "smithy.api#required": {} } }, + "updateType": { + "target": "com.amazonaws.tnb#UpdateSolNetworkType", + "traits": { + "smithy.api#documentation": "Type of the update. Only present if the network operation lcmOperationType is UPDATE
.
Network instance id filter, to retrieve network operations associated to a network instance.
", + "smithy.api#httpQuery": "nsInstanceId" + } + }, "maxResults": { "target": "smithy.api#Integer", "traits": { @@ -3197,6 +3382,18 @@ "com.amazonaws.tnb#ListSolNetworkOperationsMetadata": { "type": "structure", "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "The network service descriptor id used for the operation.
\nOnly present if the updateType is UPDATE_NS
.
The network function id used for the operation.
\nOnly present if the updateType is MODIFY_VNF_INFO
.
Identifies the function package for the function package descriptor referenced by the onboarded network package.
" + "smithy.api#documentation": "Identifies the function package for the function package descriptor referenced by the\n onboarded network package.
" } }, "metadata": { @@ -3593,6 +3790,28 @@ "smithy.api#output": {} } }, + "com.amazonaws.tnb#ModifyVnfInfoMetadata": { + "type": "structure", + "members": { + "vnfInstanceId": { + "target": "com.amazonaws.tnb#VnfInstanceId", + "traits": { + "smithy.api#documentation": "The network function instance that was updated in the network instance.
", + "smithy.api#required": {} + } + }, + "vnfConfigurableProperties": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "The configurable properties used during update of the network function instance.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Metadata related to the configuration properties used during update of a specific\n network function in a network instance.
" + } + }, "com.amazonaws.tnb#NetworkArtifactMeta": { "type": "structure", "members": { @@ -3681,12 +3900,24 @@ "smithy.api#enumValue": "NOT_INSTANTIATED" } }, + "UPDATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATED" + } + }, "IMPAIRED": { "target": "smithy.api#Unit", "traits": { "smithy.api#enumValue": "IMPAIRED" } }, + "UPDATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE_FAILED" + } + }, "STOPPED": { "target": "smithy.api#Unit", "traits": { @@ -3705,6 +3936,12 @@ "smithy.api#enumValue": "INSTANTIATE_IN_PROGRESS" } }, + "INTENT_TO_UPDATE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTENT_TO_UPDATE_IN_PROGRESS" + } + }, "UPDATE_IN_PROGRESS": { "target": "smithy.api#Unit", "traits": { @@ -3951,7 +4188,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "Function package file.
", "smithy.api#httpPayload": {}, @@ -4112,7 +4349,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "Network package file.
", "smithy.api#httpPayload": {}, @@ -4208,6 +4445,12 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.tnb#SensitiveBlob": { + "type": "blob", + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.tnb#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -4363,7 +4606,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "Amazon Web Services Telco Network Builder (TNB) is a network automation service that helps you deploy and manage telecom networks. AWS TNB helps you with the lifecycle management of your telecommunication network functions throughout planning, deployment, and post-deployment activities.
", + "smithy.api#documentation": "\n Amazon Web Services Telco Network Builder (TNB) is a network automation service that helps\n you deploy and manage telecom networks. AWS TNB helps you with the lifecycle management of\n your telecommunication network functions throughout planning, deployment, and\n post-deployment activities.
", "smithy.api#title": "AWS Telco Network Builder", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -4407,7 +4650,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4426,7 +4668,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -4454,13 +4695,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -4473,7 +4715,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4487,7 +4728,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4510,7 +4750,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4545,11 +4784,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4560,16 +4797,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -4583,14 +4823,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -4599,15 +4837,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4618,16 +4855,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -4641,7 +4881,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4661,11 +4900,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4676,20 +4913,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4700,18 +4939,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -5259,7 +5502,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.
" + "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.
" } } }, @@ -5279,7 +5522,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.
" + "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.
" } } }, @@ -5388,6 +5631,27 @@ "smithy.api#output": {} } }, + "com.amazonaws.tnb#UpdateNsMetadata": { + "type": "structure", + "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "The network service descriptor used for updating the network instance.
", + "smithy.api#required": {} + } + }, + "additionalParamsForNs": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "The configurable properties used during update.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Metadata related to the configuration properties used during update of a network instance.
" + } + }, "com.amazonaws.tnb#UpdateSolFunctionPackage": { "type": "operation", "input": { @@ -5510,7 +5774,7 @@ } ], "traits": { - "smithy.api#documentation": "Update a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
", + "smithy.api#documentation": "Update a network instance.
\nA network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.
\nChoose the updateType parameter to target the necessary update of the network instance.
", "smithy.api#examples": [ { "title": "Update a Sol Network Instance", @@ -5534,6 +5798,29 @@ "Name": "Resource" } } + }, + { + "title": "Update a Sol Network Instance", + "input": { + "nsInstanceId": "ni-0d5b823eb5c2a9241", + "updateType": "UPDATE_NS", + "updateNs": { + "nsdInfoId": "np-0d5b823eb5c2a9241", + "additionalParamsForNs": { + "cidr_block": "10.0.0.0/16", + "availability_zone": "us-west-2a" + } + }, + "tags": { + "Name": "Resource" + } + }, + "output": { + "nsLcmOpOccId": "no-0d5b823eb5c2a9241", + "tags": { + "Name": "Resource" + } + } } ], "smithy.api#http": { @@ -5557,20 +5844,26 @@ "updateType": { "target": "com.amazonaws.tnb#UpdateSolNetworkType", "traits": { - "smithy.api#documentation": "The type of update.
", + "smithy.api#documentation": "The type of update.
\nUse the MODIFY_VNF_INFORMATION
update type, to update a specific network function\n configuration, in the network instance.
Use the UPDATE_NS
update type, to update the network instance to a\n new network service descriptor.
Identifies the network function information parameters and/or the configurable properties of the network function to be modified.
" + "smithy.api#documentation": "Identifies the network function information parameters and/or the configurable\n properties of the network function to be modified.
\nInclude this property only if the update type is MODIFY_VNF_INFORMATION
.
Identifies the network service descriptor and the configurable\n properties of the descriptor, to be used for the update.
\nInclude this property only if the update type is UPDATE_NS
.
A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.
" + "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.
" } } }, @@ -5590,7 +5883,7 @@ "tags": { "target": "com.amazonaws.tnb#TagMap", "traits": { - "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.
" + "smithy.api#documentation": "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.
" } } }, @@ -5611,7 +5904,7 @@ "vnfConfigurableProperties": { "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "Provides values for the configurable properties declared in the function package descriptor.
", + "smithy.api#documentation": "Provides values for the configurable properties declared in the function package\n descriptor.
", "smithy.api#required": {} } } @@ -5713,6 +6006,27 @@ "smithy.api#output": {} } }, + "com.amazonaws.tnb#UpdateSolNetworkServiceData": { + "type": "structure", + "members": { + "nsdInfoId": { + "target": "com.amazonaws.tnb#NsdInfoId", + "traits": { + "smithy.api#documentation": "ID of the network service descriptor.
", + "smithy.api#required": {} + } + }, + "additionalParamsForNs": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "Values for the configurable properties declared in the network service descriptor.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information parameters and/or the configurable properties for a network descriptor used for update.
" + } + }, "com.amazonaws.tnb#UpdateSolNetworkType": { "type": "enum", "members": { @@ -5721,6 +6035,12 @@ "traits": { "smithy.api#enumValue": "MODIFY_VNF_INFORMATION" } + }, + "UPDATE_NS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE_NS" + } } } }, @@ -5767,7 +6087,7 @@ } ], "traits": { - "smithy.api#documentation": "Validates function package content. This can be used as a dry run before uploading function package content with PutSolFunctionPackageContent.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.
", + "smithy.api#documentation": "Validates function package content. This can be used as a dry run before uploading\n function package content with PutSolFunctionPackageContent.
\nA function package is a .zip file in CSAR (Cloud Service Archive) format that contains a network function (an ETSI standard telecommunication application) and function package descriptor that uses the TOSCA standard to describe how the network functions should run on your network.
", "smithy.api#examples": [ { "title": "Validate a Function Package content", @@ -5817,7 +6137,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "Function package file.
", "smithy.api#httpPayload": {}, @@ -5916,7 +6236,7 @@ } ], "traits": { - "smithy.api#documentation": "Validates network package content. This can be used as a dry run before uploading network package content with PutSolNetworkPackageContent.
\nA network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.
", + "smithy.api#documentation": "Validates network package content. This can be used as a dry run before uploading\n network package content with PutSolNetworkPackageContent.
\nA network package is a .zip file in CSAR (Cloud Service Archive) format defines the function packages you want to deploy and the Amazon Web Services infrastructure you want to deploy them on.
", "smithy.api#examples": [ { "title": "Validate the network package content of a NSD archive", @@ -5978,7 +6298,7 @@ } }, "file": { - "target": "smithy.api#Blob", + "target": "com.amazonaws.tnb#SensitiveBlob", "traits": { "smithy.api#documentation": "Network package file.
", "smithy.api#httpPayload": {}, @@ -6069,7 +6389,7 @@ } }, "traits": { - "smithy.api#documentation": "Unable to process the request because the client provided input failed to satisfy request constraints.
", + "smithy.api#documentation": "Unable to process the request because the client provided input failed to satisfy\n request constraints.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/models/waf-regional.json b/models/waf-regional.json index 7e1900e027..dc6128bc42 100644 --- a/models/waf-regional.json +++ b/models/waf-regional.json @@ -7937,6 +7937,21 @@ ] } } + ], + "smithy.test#smokeTests": [ + { + "id": "ListRulesSuccess", + "params": { + "Limit": 20 + }, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } ] } }, diff --git a/models/workspaces.json b/models/workspaces.json index 9626704585..bf79144a7d 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -1076,7 +1076,7 @@ "DesiredUserSessions": { "target": "com.amazonaws.workspaces#DesiredUserSessions", "traits": { - "smithy.api#documentation": "The desired number of user sessions for a multi-session pool. \n This is not allowed for single-session pools.
", + "smithy.api#documentation": "The desired number of user sessions for the WorkSpaces in the pool.
", "smithy.api#required": {} } } @@ -1091,7 +1091,7 @@ "AvailableUserSessions": { "target": "com.amazonaws.workspaces#AvailableUserSessions", "traits": { - "smithy.api#documentation": "The number of user sessions currently being used for pool sessions. This only applies to multi-session pools.
", + "smithy.api#documentation": "The number of user sessions currently available for streaming from your pool.
\nAvailableUserSessions = ActualUserSessions - ActiveUserSessions
", "smithy.api#required": {} } }, @@ -1105,14 +1105,14 @@ "ActualUserSessions": { "target": "com.amazonaws.workspaces#ActualUserSessions", "traits": { - "smithy.api#documentation": "The total number of session slots that are available for a pool of WorkSpaces.
", + "smithy.api#documentation": "The total number of user sessions that are available for streaming or are currently \n streaming in your pool.
\nActualUserSessions = AvailableUserSessions + ActiveUserSessions
", "smithy.api#required": {} } }, "ActiveUserSessions": { "target": "com.amazonaws.workspaces#ActiveUserSessions", "traits": { - "smithy.api#documentation": "The number of user sessions currently being used for pool sessions. This only applies to multi-session pools.
", + "smithy.api#documentation": "The number of user sessions currently being used for your pool.
", "smithy.api#required": {} } } @@ -2478,7 +2478,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates one or more WorkSpaces.
\nThis operation is asynchronous and returns before the WorkSpaces are created.
\nThe MANUAL
running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.
You don't need to specify the PCOIP
protocol for Linux bundles\n because WSP
is the default protocol for those bundles.
User-decoupled WorkSpaces are only supported by Amazon WorkSpaces\n Core.
\nCreates one or more WorkSpaces.
\nThis operation is asynchronous and returns before the WorkSpaces are created.
\nThe MANUAL
running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.
You don't need to specify the PCOIP
protocol for Linux bundles\n because WSP
is the default protocol for those bundles.
User-decoupled WorkSpaces are only supported by Amazon WorkSpaces\n Core.
\nReview your running mode to ensure you are using one that is optimal for your needs and budget.\n For more information on switching running modes, see \n \n Can I switch between hourly and monthly billing?\n
\nIf specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11\n BYOL images. For more information about subscribing to Office for BYOL images, see Bring\n Your Own Windows Desktop Licenses.
\nAlthough this parameter is an array, only one item is allowed at this time.
\nWindows 11 only supports Microsoft_Office_2019
.
If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11\n BYOL images. For more information about subscribing to Office for BYOL images, see Bring\n Your Own Windows Desktop Licenses.
\nAlthough this parameter is an array, only one item is allowed at this\n time.
\nDuring the image import process, non-GPU WSP WorkSpaces with Windows 11 support\n only Microsoft_Office_2019
. GPU WSP WorkSpaces with Windows 11 do not\n support Office installation.
The running mode. For more information, see Manage the WorkSpace Running\n Mode.
\nThe MANUAL
value is only supported by Amazon WorkSpaces Core. Contact\n your account team to be allow-listed to use this value. For more information, see\n Amazon WorkSpaces Core.
The running mode. For more information, see Manage the WorkSpace Running\n Mode.
\nThe MANUAL
value is only supported by Amazon WorkSpaces Core. Contact\n your account team to be allow-listed to use this value. For more information, see\n Amazon WorkSpaces Core.
Review your running mode to ensure you are using one that is optimal for your needs and\n budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?\n
" } }, "RunningModeAutoStopTimeoutInMinutes": {